async_openai/types/moderations/moderation.rs
1use derive_builder::Builder;
2use serde::{Deserialize, Serialize};
3
4use crate::error::OpenAIError;
5
6#[derive(Debug, Serialize, Clone, PartialEq, Deserialize)]
7#[serde(untagged)]
8pub enum ModerationInput {
9 /// A single string of text to classify for moderation
10 String(String),
11
12 /// An array of strings to classify for moderation
13 StringArray(Vec<String>),
14
15 /// An array of multi-modal inputs to the moderation model
16 MultiModal(Vec<ModerationContentPart>),
17}
18
19#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
20pub struct ModerationTextInput {
21 /// A string of text to classify
22 pub text: String,
23}
24
25#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
26pub struct ModerationImageURLInput {
27 /// Either a URL of the image or the base64 encoded image data.
28 pub image_url: String,
29}
30
31/// Content part for multi-modal moderation input
32#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
33#[serde(tag = "type")]
34pub enum ModerationContentPart {
35 /// An object describing text to classify
36 #[serde(rename = "text")]
37 Text(ModerationTextInput),
38
39 /// An object describing an image to classify
40 #[serde(rename = "image_url")]
41 ImageUrl(ModerationImageURLInput),
42}
43
44#[derive(Debug, Default, Clone, Serialize, Builder, PartialEq, Deserialize)]
45#[builder(name = "CreateModerationRequestArgs")]
46#[builder(pattern = "mutable")]
47#[builder(setter(into, strip_option), default)]
48#[builder(derive(Debug))]
49#[builder(build_fn(error = "OpenAIError"))]
50pub struct CreateModerationRequest {
51 /// Input (or inputs) to classify. Can be a single string, an array of strings, or
52 /// an array of multi-modal input objects similar to other models.
53 pub input: ModerationInput,
54
55 /// The content moderation model you would like to use. Learn more in
56 /// [the moderation guide](https://platform.openai.com/docs/guides/moderation), and learn about
57 /// available models [here](https://platform.openai.com/docs/models#moderation).
58 #[serde(skip_serializing_if = "Option::is_none")]
59 pub model: Option<String>,
60}
61
62#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
63pub struct Categories {
64 /// Content that expresses, incites, or promotes hate based on race, gender,
65 /// ethnicity, religion, nationality, sexual orientation, disability status, or
66 /// caste. Hateful content aimed at non-protected groups (e.g., chess players)
67 /// is harrassment.
68 pub hate: bool,
69 #[serde(rename = "hate/threatening")]
70 /// Hateful content that also includes violence or serious harm towards the
71 /// targeted group based on race, gender, ethnicity, religion, nationality,
72 /// sexual orientation, disability status, or caste.
73 pub hate_threatening: bool,
74 /// Content that expresses, incites, or promotes harassing language towards any target.
75 pub harassment: bool,
76 /// Harassment content that also includes violence or serious harm towards any target.
77 #[serde(rename = "harassment/threatening")]
78 pub harassment_threatening: bool,
79 /// Content that includes instructions or advice that facilitate the planning or execution of wrongdoing, or that gives advice or instruction on how to commit illicit acts. For example, "how to shoplift" would fit this category.
80 pub illicit: bool,
81 /// Content that includes instructions or advice that facilitate the planning or execution of wrongdoing that also includes violence, or that gives advice or instruction on the procurement of any weapon.
82 #[serde(rename = "illicit/violent")]
83 pub illicit_violent: bool,
84 /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.
85 #[serde(rename = "self-harm")]
86 pub self_harm: bool,
87 /// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.
88 #[serde(rename = "self-harm/intent")]
89 pub self_harm_intent: bool,
90 /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.
91 #[serde(rename = "self-harm/instructions")]
92 pub self_harm_instructions: bool,
93 /// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).
94 pub sexual: bool,
95 /// Sexual content that includes an individual who is under 18 years old.
96 #[serde(rename = "sexual/minors")]
97 pub sexual_minors: bool,
98 /// Content that depicts death, violence, or physical injury.
99 pub violence: bool,
100 /// Content that depicts death, violence, or physical injury in graphic detail.
101 #[serde(rename = "violence/graphic")]
102 pub violence_graphic: bool,
103}
104
105/// A list of the categories along with their scores as predicted by model.
106#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
107pub struct CategoryScore {
108 /// The score for the category 'hate'.
109 pub hate: f32,
110 /// The score for the category 'hate/threatening'.
111 #[serde(rename = "hate/threatening")]
112 pub hate_threatening: f32,
113 /// The score for the category 'harassment'.
114 pub harassment: f32,
115 /// The score for the category 'harassment/threatening'.
116 #[serde(rename = "harassment/threatening")]
117 pub harassment_threatening: f32,
118 /// The score for the category 'illicit'.
119 pub illicit: f32,
120 /// The score for the category 'illicit/violent'.
121 #[serde(rename = "illicit/violent")]
122 pub illicit_violent: f32,
123 /// The score for the category 'self-harm'.
124 #[serde(rename = "self-harm")]
125 pub self_harm: f32,
126 /// The score for the category 'self-harm/intent'.
127 #[serde(rename = "self-harm/intent")]
128 pub self_harm_intent: f32,
129 /// The score for the category 'self-harm/instructions'.
130 #[serde(rename = "self-harm/instructions")]
131 pub self_harm_instructions: f32,
132 /// The score for the category 'sexual'.
133 pub sexual: f32,
134 /// The score for the category 'sexual/minors'.
135 #[serde(rename = "sexual/minors")]
136 pub sexual_minors: f32,
137 /// The score for the category 'violence'.
138 pub violence: f32,
139 /// The score for the category 'violence/graphic'.
140 #[serde(rename = "violence/graphic")]
141 pub violence_graphic: f32,
142}
143
144#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
145pub struct ContentModerationResult {
146 /// Whether any of the below categories are flagged.
147 pub flagged: bool,
148 /// A list of the categories, and whether they are flagged or not.
149 pub categories: Categories,
150 /// A list of the categories along with their scores as predicted by model.
151 pub category_scores: CategoryScore,
152 /// A list of the categories along with the input type(s) that the score applies to.
153 pub category_applied_input_types: CategoryAppliedInputTypes,
154}
155
156/// Represents if a given text input is potentially harmful.
157#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
158pub struct CreateModerationResponse {
159 /// The unique identifier for the moderation request.
160 pub id: String,
161 /// The model used to generate the moderation results.
162 pub model: String,
163 /// A list of moderation objects.
164 pub results: Vec<ContentModerationResult>,
165}
166
167/// A list of the categories along with the input type(s) that the score applies to.
168#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
169pub struct CategoryAppliedInputTypes {
170 /// The applied input type(s) for the category 'hate'.
171 pub hate: Vec<ModInputType>,
172
173 /// The applied input type(s) for the category 'hate/threatening'.
174 #[serde(rename = "hate/threatening")]
175 pub hate_threatening: Vec<ModInputType>,
176
177 /// The applied input type(s) for the category 'harassment'.
178 pub harassment: Vec<ModInputType>,
179
180 /// The applied input type(s) for the category 'harassment/threatening'.
181 #[serde(rename = "harassment/threatening")]
182 pub harassment_threatening: Vec<ModInputType>,
183
184 /// The applied input type(s) for the category 'illicit'.
185 pub illicit: Vec<ModInputType>,
186
187 /// The applied input type(s) for the category 'illicit/violent'.
188 #[serde(rename = "illicit/violent")]
189 pub illicit_violent: Vec<ModInputType>,
190
191 /// The applied input type(s) for the category 'self-harm'.
192 #[serde(rename = "self-harm")]
193 pub self_harm: Vec<ModInputType>,
194
195 /// The applied input type(s) for the category 'self-harm/intent'.
196 #[serde(rename = "self-harm/intent")]
197 pub self_harm_intent: Vec<ModInputType>,
198
199 /// The applied input type(s) for the category 'self-harm/instructions'.
200 #[serde(rename = "self-harm/instructions")]
201 pub self_harm_instructions: Vec<ModInputType>,
202
203 /// The applied input type(s) for the category 'sexual'.
204 pub sexual: Vec<ModInputType>,
205
206 /// The applied input type(s) for the category 'sexual/minors'.
207 #[serde(rename = "sexual/minors")]
208 pub sexual_minors: Vec<ModInputType>,
209
210 /// The applied input type(s) for the category 'violence'.
211 pub violence: Vec<ModInputType>,
212
213 /// The applied input type(s) for the category 'violence/graphic'.
214 #[serde(rename = "violence/graphic")]
215 pub violence_graphic: Vec<ModInputType>,
216}
217
218/// The type of input that was moderated
219#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
220#[serde(rename_all = "lowercase")]
221pub enum ModInputType {
222 /// Text content that was moderated
223 Text,
224 /// Image content that was moderated
225 Image,
226}