1#![allow(clippy::uninlined_format_args)]
2use openai_ergonomic::{Client, Result};
16use serde::{Deserialize, Serialize};
17use std::collections::HashMap;
18
19#[derive(Debug, Serialize, Deserialize)]
20struct ModerationResult {
21 flagged: bool,
22 categories: HashMap<String, bool>,
23 scores: HashMap<String, f64>,
24}
25
26#[derive(Debug)]
27struct ModerationPolicy {
28 thresholds: HashMap<String, f64>,
29 auto_reject_categories: Vec<String>,
30 require_human_review: Vec<String>,
31}
32
33#[tokio::main]
34async fn main() -> Result<()> {
35 use openai_ergonomic::Config;
36
37 println!("=== Content Moderation Example ===\n");
38
39 let client = if let Ok(c) = Client::from_env() {
41 c.build()
42 } else {
43 println!("Note: OPENAI_API_KEY not found. Running in demo mode.");
44 println!("Set OPENAI_API_KEY to test real API calls.\n");
45 println!("To use the Moderations API:");
46 println!(" let client = Client::from_env()?.build();");
47 println!(" let builder = client.moderations().check(\"text to moderate\");");
48 println!(" let response = client.moderations().create(builder).await?;");
49 println!();
50 println!("Running demonstration examples...\n");
51 Client::builder(Config::builder().api_key("demo-key").build())?.build()
53 };
54
55 println!("1. Basic Moderation:");
57 basic_moderation(&client);
58
59 println!("\n2. Category Detection:");
61 category_detection(&client);
62
63 println!("\n3. Custom Thresholds:");
65 custom_thresholds(&client);
66
67 println!("\n4. Multi-language Moderation:");
69 multilingual_moderation(&client);
70
71 println!("\n5. Batch Moderation:");
73 batch_moderation(&client);
74
75 println!("\n6. Response Filtering:");
77 response_filtering(&client).await?;
78
79 println!("\n7. Policy Enforcement:");
81 policy_enforcement(&client);
82
83 println!("\n8. Moderation Pipeline:");
85 moderation_pipeline(&client).await?;
86
87 Ok(())
88}
89
90fn basic_moderation(_client: &Client) {
91 let test_inputs = vec![
93 "This is a completely normal message about the weather.",
94 "I really hate when people do that!",
95 "Let's discuss this professional topic.",
96 ];
97
98 println!("Basic moderation demonstrates checking multiple text inputs.");
99 println!("Note: To actually call the API, uncomment the async examples at the end of main().");
100 println!();
101
102 for input in test_inputs {
103 println!("Input: '{}'", input);
104
105 let result = simulate_moderation(input);
111
112 println!(" Flagged: {}", result.flagged);
113 if result.flagged {
114 println!(" Categories: {:?}", result.categories);
115 }
116 println!();
117 }
118}
119
120fn category_detection(_client: &Client) {
121 let categories = vec![
123 "harassment",
124 "harassment/threatening",
125 "hate",
126 "hate/threatening",
127 "self-harm",
128 "self-harm/intent",
129 "self-harm/instructions",
130 "sexual",
131 "sexual/minors",
132 "violence",
133 "violence/graphic",
134 ];
135
136 println!("Available moderation categories:");
137 for category in &categories {
138 println!("- {}", category);
139 }
140
141 let test_content = "Let's have a productive discussion about technology.";
143 let result = simulate_moderation(test_content);
144
145 println!("\nAnalyzing: '{}'", test_content);
146 println!("Results:");
147 for category in categories {
148 let flagged = result.categories.get(category).unwrap_or(&false);
149 let score = result.scores.get(category).unwrap_or(&0.0);
150 println!(
151 " {:<25} Flagged: {:5} Score: {:.4}",
152 category, flagged, score
153 );
154 }
155}
156
157fn custom_thresholds(_client: &Client) {
158 let mut custom_thresholds = HashMap::new();
160 custom_thresholds.insert("harassment".to_string(), 0.7);
161 custom_thresholds.insert("violence".to_string(), 0.8);
162 custom_thresholds.insert("sexual".to_string(), 0.5);
163
164 let test_content = "This content needs moderation checking";
165 let result = simulate_moderation(test_content);
166
167 println!("Custom threshold evaluation:");
168 for (category, threshold) in &custom_thresholds {
169 let score = result.scores.get(category).unwrap_or(&0.0);
170 let flagged = score >= threshold;
171
172 println!(
173 "Category: {:<15} Score: {:.3} Threshold: {:.1} -> {}",
174 category,
175 score,
176 threshold,
177 if flagged { "FLAGGED" } else { "OK" }
178 );
179 }
180}
181
182fn multilingual_moderation(_client: &Client) {
183 let multilingual_tests = vec![
185 ("English", "This is a test message"),
186 ("Spanish", "Este es un mensaje de prueba"),
187 ("French", "Ceci est un message de test"),
188 ("German", "Dies ist eine Testnachricht"),
189 ("Japanese", "これはテストメッセージです"),
190 ];
191
192 for (language, content) in multilingual_tests {
193 println!("{}: '{}'", language, content);
194
195 let result = simulate_moderation(content);
196 println!(" Flagged: {}", result.flagged);
197 }
198}
199
200fn batch_moderation(_client: &Client) {
201 let batch_content = [
203 "First message to check",
204 "Second message to check",
205 "Third message to check",
206 "Fourth message to check",
207 "Fifth message to check",
208 ];
209
210 println!("Batch moderation of {} items:", batch_content.len());
211
212 let chunk_size = 3;
214 for (i, chunk) in batch_content.chunks(chunk_size).enumerate() {
215 println!("\nChunk {} ({} items):", i + 1, chunk.len());
216
217 for content in chunk {
218 let result = simulate_moderation(content);
219 println!(
220 " '{}...' -> {}",
221 &content[..20.min(content.len())],
222 if result.flagged { "FLAGGED" } else { "OK" }
223 );
224 }
225 }
226}
227
228async fn response_filtering(client: &Client) -> Result<()> {
229 println!("Generating and moderating AI responses:");
232
233 let prompt = "Tell me about technology";
235 let builder = client.chat().user(prompt).max_completion_tokens(100);
236 let response = client.send_chat(builder).await?;
237
238 if let Some(content) = response.content() {
239 println!("Generated response: '{}'", content);
240
241 let moderation_result = simulate_moderation(content);
243
244 if moderation_result.flagged {
245 println!(
246 "⚠️ Response flagged! Categories: {:?}",
247 moderation_result.categories
248 );
249 println!("Action: Response blocked or regenerated");
250
251 let safe_builder = client
253 .chat()
254 .system("Provide helpful, safe, and appropriate responses only.")
255 .user(prompt)
256 .max_completion_tokens(100);
257 let safe_response = client.send_chat(safe_builder).await?;
258
259 if let Some(safe_content) = safe_response.content() {
260 println!("Regenerated safe response: '{}'", safe_content);
261 }
262 } else {
263 println!("✓ Response passed moderation");
264 }
265 }
266
267 Ok(())
268}
269
270fn policy_enforcement(_client: &Client) {
271 let policy = ModerationPolicy {
273 thresholds: HashMap::from([
274 ("harassment".to_string(), 0.5),
275 ("violence".to_string(), 0.6),
276 ("sexual".to_string(), 0.4),
277 ]),
278 auto_reject_categories: vec![
279 "harassment/threatening".to_string(),
280 "violence/graphic".to_string(),
281 ],
282 require_human_review: vec!["self-harm".to_string()],
283 };
284
285 let test_cases = vec![
286 "Normal conversation about work",
287 "Slightly aggressive language here",
288 "Content requiring review",
289 ];
290
291 for content in test_cases {
292 println!("Checking: '{}'", content);
293
294 let result = simulate_moderation(content);
295 let action = apply_policy(&result, &policy);
296
297 match action {
298 PolicyAction::Approve => println!(" ✓ Approved"),
299 PolicyAction::Reject(reason) => println!(" ✗ Rejected: {}", reason),
300 PolicyAction::Review(reason) => println!(" ⚠ Human review needed: {}", reason),
301 }
302 }
303}
304
305async fn moderation_pipeline(client: &Client) -> Result<()> {
306 type FilterFn = Box<dyn Fn(&str) -> bool + Send + Sync>;
309
310 struct ModerationPipeline {
311 pre_filters: Vec<FilterFn>,
312 post_filters: Vec<FilterFn>,
313 }
314
315 let pipeline = ModerationPipeline {
316 pre_filters: vec![
317 Box::new(|text| text.len() < 10000), Box::new(|text| !text.is_empty()), ],
320 post_filters: vec![
321 Box::new(|text| !text.contains("blockedword")), ],
323 };
324
325 println!("Running moderation pipeline:");
326
327 let user_input = "Please help me with this technical question about Rust programming.";
328
329 println!("1. Pre-filters:");
331 for (i, filter) in pipeline.pre_filters.iter().enumerate() {
332 if filter(user_input) {
333 println!(" ✓ Pre-filter {} passed", i + 1);
334 } else {
335 println!(" ✗ Pre-filter {} failed", i + 1);
336 return Ok(());
337 }
338 }
339
340 println!("2. API moderation:");
342 let moderation_result = simulate_moderation(user_input);
343 if moderation_result.flagged {
344 println!(" ✗ Content flagged by API");
345 return Ok(());
346 }
347 println!(" ✓ Passed API moderation");
348
349 println!("3. Generating response:");
351 let builder = client.chat().user(user_input).max_completion_tokens(50);
352 let response = client.send_chat(builder).await?;
353
354 if let Some(content) = response.content() {
355 println!(" Generated: '{}'", content);
356
357 println!("4. Post-filters:");
359 for (i, filter) in pipeline.post_filters.iter().enumerate() {
360 if filter(content) {
361 println!(" ✓ Post-filter {} passed", i + 1);
362 } else {
363 println!(" ✗ Post-filter {} failed", i + 1);
364 return Ok(());
365 }
366 }
367
368 println!("5. Response moderation:");
370 let response_moderation = simulate_moderation(content);
371 if response_moderation.flagged {
372 println!(" ✗ Response flagged");
373 } else {
374 println!(" ✓ Response approved");
375 println!("\nFinal output: '{}'", content);
376 }
377 }
378
379 Ok(())
380}
381
382fn simulate_moderation(content: &str) -> ModerationResult {
385 let mut categories = HashMap::new();
387 let mut scores = HashMap::new();
388
389 let harassment_score = if content.contains("hate") { 0.8 } else { 0.1 };
391 let violence_score = if content.contains("aggressive") {
392 0.6
393 } else {
394 0.05
395 };
396
397 categories.insert("harassment".to_string(), harassment_score > 0.5);
398 categories.insert("violence".to_string(), violence_score > 0.5);
399 categories.insert("sexual".to_string(), false);
400
401 scores.insert("harassment".to_string(), harassment_score);
402 scores.insert("violence".to_string(), violence_score);
403 scores.insert("sexual".to_string(), 0.01);
404
405 ModerationResult {
406 flagged: harassment_score > 0.5 || violence_score > 0.5,
407 categories,
408 scores,
409 }
410}
411
412enum PolicyAction {
413 Approve,
414 Reject(String),
415 Review(String),
416}
417
418fn apply_policy(result: &ModerationResult, policy: &ModerationPolicy) -> PolicyAction {
419 for category in &policy.auto_reject_categories {
421 if *result.categories.get(category).unwrap_or(&false) {
422 return PolicyAction::Reject(format!("Auto-rejected: {}", category));
423 }
424 }
425
426 for category in &policy.require_human_review {
428 if *result.categories.get(category).unwrap_or(&false) {
429 return PolicyAction::Review(format!("Review needed: {}", category));
430 }
431 }
432
433 for (category, threshold) in &policy.thresholds {
435 if let Some(score) = result.scores.get(category) {
436 if score > threshold {
437 return PolicyAction::Reject(format!(
438 "{} score ({:.2}) exceeds threshold ({:.2})",
439 category, score, threshold
440 ));
441 }
442 }
443 }
444
445 PolicyAction::Approve
446}
447
448