openai_api_rs/v1/
moderation.rs1use serde::{Deserialize, Serialize};
2
3use crate::impl_builder_methods;
4
5#[derive(Debug, Serialize, Clone)]
6pub struct CreateModerationRequest {
7 pub input: String,
8 #[serde(skip_serializing_if = "Option::is_none")]
9 pub model: Option<String>,
10}
11
12impl CreateModerationRequest {
13 pub fn new(input: String) -> Self {
14 Self { input, model: None }
15 }
16}
17
18impl_builder_methods!(
19 CreateModerationRequest,
20 model: String
21);
22
23#[derive(Debug, Deserialize, Serialize)]
24pub struct CreateModerationResponse {
25 pub id: String,
26 pub model: String,
27 pub results: Vec<ModerationResult>,
28}
29
30#[derive(Debug, Deserialize, Serialize)]
31pub struct ModerationResult {
32 pub categories: ModerationCategories,
33 pub category_scores: ModerationCategoryScores,
34 pub flagged: bool,
35}
36
37#[derive(Debug, Deserialize, Serialize)]
38pub struct ModerationCategories {
39 #[serde(rename = "hate")]
40 pub is_hate: bool,
41 #[serde(rename = "hate/threatening")]
42 pub is_hate_threatening: bool,
43 #[serde(rename = "self-harm")]
44 pub is_self_harm: bool,
45 pub sexual: bool,
46 #[serde(rename = "sexual/minors")]
47 pub is_sexual_minors: bool,
48 pub violence: bool,
49 #[serde(rename = "violence/graphic")]
50 pub is_violence_graphic: bool,
51}
52
53#[derive(Debug, Deserialize, Serialize)]
54pub struct ModerationCategoryScores {
55 #[serde(rename = "hate")]
56 pub hate_score: f64,
57 #[serde(rename = "hate/threatening")]
58 pub hate_threatening_score: f64,
59 #[serde(rename = "self-harm")]
60 pub self_harm_score: f64,
61 pub sexual: f64,
62 #[serde(rename = "sexual/minors")]
63 pub sexual_minors_score: f64,
64 pub violence: f64,
65 #[serde(rename = "violence/graphic")]
66 pub violence_graphic_score: f64,
67}