async_openai_wasm/types/
moderation.rs

1use derive_builder::Builder;
2use serde::{Deserialize, Serialize};
3
4use crate::error::OpenAIError;
5
6#[derive(Debug, Serialize, Clone, PartialEq, Deserialize)]
7#[serde(untagged)]
8pub enum ModerationInput {
9    /// A single string of text to classify for moderation
10    String(String),
11
12    /// An array of strings to classify for moderation
13    StringArray(Vec<String>),
14
15    /// An array of multi-modal inputs to the moderation model
16    MultiModal(Vec<ModerationContentPart>),
17}
18
19/// Content part for multi-modal moderation input
20#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
21#[serde(tag = "type")]
22pub enum ModerationContentPart {
23    /// An object describing text to classify
24    #[serde(rename = "text")]
25    Text {
26        /// A string of text to classify
27        text: String,
28    },
29
30    /// An object describing an image to classify
31    #[serde(rename = "image_url")]
32    ImageUrl {
33        /// Contains either an image URL or a data URL for a base64 encoded image
34        image_url: ModerationImageUrl,
35    },
36}
37
38/// Image URL configuration for image moderation
39#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
40pub struct ModerationImageUrl {
41    /// Either a URL of the image or the base64 encoded image data
42    pub url: String,
43}
44
45#[derive(Debug, Default, Clone, Serialize, Builder, PartialEq, Deserialize)]
46#[builder(name = "CreateModerationRequestArgs")]
47#[builder(pattern = "mutable")]
48#[builder(setter(into, strip_option), default)]
49#[builder(derive(Debug))]
50#[builder(build_fn(error = "OpenAIError"))]
51pub struct CreateModerationRequest {
52    /// Input (or inputs) to classify. Can be a single string, an array of strings, or
53    /// an array of multi-modal input objects similar to other models.
54    pub input: ModerationInput,
55
56    /// The content moderation model you would like to use. Learn more in the
57    /// [moderation guide](https://platform.openai.com/docs/guides/moderation), and learn about
58    /// available models [here](https://platform.openai.com/docs/models/moderation).
59    #[serde(skip_serializing_if = "Option::is_none")]
60    pub model: Option<String>,
61}
62
63#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
64pub struct Category {
65    /// Content that expresses, incites, or promotes hate based on race, gender,
66    /// ethnicity, religion, nationality, sexual orientation, disability status, or
67    /// caste. Hateful content aimed at non-protected groups (e.g., chess players)
68    /// is harrassment.
69    pub hate: bool,
70    #[serde(rename = "hate/threatening")]
71    /// Hateful content that also includes violence or serious harm towards the
72    /// targeted group based on race, gender, ethnicity, religion, nationality,
73    /// sexual orientation, disability status, or caste.
74    pub hate_threatening: bool,
75    /// Content that expresses, incites, or promotes harassing language towards any target.
76    pub harassment: bool,
77    /// Harassment content that also includes violence or serious harm towards any target.
78    #[serde(rename = "harassment/threatening")]
79    pub harassment_threatening: bool,
80    /// Content that includes instructions or advice that facilitate the planning or execution of wrongdoing, or that gives advice or instruction on how to commit illicit acts. For example, "how to shoplift" would fit this category.
81    pub illicit: bool,
82    /// Content that includes instructions or advice that facilitate the planning or execution of wrongdoing that also includes violence, or that gives advice or instruction on the procurement of any weapon.
83    #[serde(rename = "illicit/violent")]
84    pub illicit_violent: bool,
85    /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.
86    #[serde(rename = "self-harm")]
87    pub self_harm: bool,
88    /// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.
89    #[serde(rename = "self-harm/intent")]
90    pub self_harm_intent: bool,
91    /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.
92    #[serde(rename = "self-harm/instructions")]
93    pub self_harm_instructions: bool,
94    /// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).
95    pub sexual: bool,
96    /// Sexual content that includes an individual who is under 18 years old.
97    #[serde(rename = "sexual/minors")]
98    pub sexual_minors: bool,
99    /// Content that depicts death, violence, or physical injury.
100    pub violence: bool,
101    /// Content that depicts death, violence, or physical injury in graphic detail.
102    #[serde(rename = "violence/graphic")]
103    pub violence_graphic: bool,
104}
105
106/// A list of the categories along with their scores as predicted by model.
107#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
108pub struct CategoryScore {
109    /// The score for the category 'hate'.
110    pub hate: f32,
111    /// The score for the category 'hate/threatening'.
112    #[serde(rename = "hate/threatening")]
113    pub hate_threatening: f32,
114    /// The score for the category 'harassment'.
115    pub harassment: f32,
116    /// The score for the category 'harassment/threatening'.
117    #[serde(rename = "harassment/threatening")]
118    pub harassment_threatening: f32,
119    /// The score for the category 'illicit'.
120    pub illicit: f32,
121    /// The score for the category 'illicit/violent'.
122    #[serde(rename = "illicit/violent")]
123    pub illicit_violent: f32,
124    /// The score for the category 'self-harm'.
125    #[serde(rename = "self-harm")]
126    pub self_harm: f32,
127    /// The score for the category 'self-harm/intent'.
128    #[serde(rename = "self-harm/intent")]
129    pub self_harm_intent: f32,
130    /// The score for the category 'self-harm/instructions'.
131    #[serde(rename = "self-harm/instructions")]
132    pub self_harm_instructions: f32,
133    /// The score for the category 'sexual'.
134    pub sexual: f32,
135    /// The score for the category 'sexual/minors'.
136    #[serde(rename = "sexual/minors")]
137    pub sexual_minors: f32,
138    /// The score for the category 'violence'.
139    pub violence: f32,
140    /// The score for the category 'violence/graphic'.
141    #[serde(rename = "violence/graphic")]
142    pub violence_graphic: f32,
143}
144
145#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
146pub struct ContentModerationResult {
147    /// Whether any of the below categories are flagged.
148    pub flagged: bool,
149    /// A list of the categories, and whether they are flagged or not.
150    pub categories: Category,
151    /// A list of the categories along with their scores as predicted by model.
152    pub category_scores: CategoryScore,
153    /// A list of the categories along with the input type(s) that the score applies to.
154    pub category_applied_input_types: CategoryAppliedInputTypes,
155}
156
157/// Represents if a given text input is potentially harmful.
158#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
159pub struct CreateModerationResponse {
160    /// The unique identifier for the moderation request.
161    pub id: String,
162    /// The model used to generate the moderation results.
163    pub model: String,
164    /// A list of moderation objects.
165    pub results: Vec<ContentModerationResult>,
166}
167
168/// A list of the categories along with the input type(s) that the score applies to.
169#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
170pub struct CategoryAppliedInputTypes {
171    /// The applied input type(s) for the category 'hate'.
172    pub hate: Vec<ModInputType>,
173
174    /// The applied input type(s) for the category 'hate/threatening'.
175    #[serde(rename = "hate/threatening")]
176    pub hate_threatening: Vec<ModInputType>,
177
178    /// The applied input type(s) for the category 'harassment'.
179    pub harassment: Vec<ModInputType>,
180
181    /// The applied input type(s) for the category 'harassment/threatening'.
182    #[serde(rename = "harassment/threatening")]
183    pub harassment_threatening: Vec<ModInputType>,
184
185    /// The applied input type(s) for the category 'illicit'.
186    pub illicit: Vec<ModInputType>,
187
188    /// The applied input type(s) for the category 'illicit/violent'.
189    #[serde(rename = "illicit/violent")]
190    pub illicit_violent: Vec<ModInputType>,
191
192    /// The applied input type(s) for the category 'self-harm'.
193    #[serde(rename = "self-harm")]
194    pub self_harm: Vec<ModInputType>,
195
196    /// The applied input type(s) for the category 'self-harm/intent'.
197    #[serde(rename = "self-harm/intent")]
198    pub self_harm_intent: Vec<ModInputType>,
199
200    /// The applied input type(s) for the category 'self-harm/instructions'.
201    #[serde(rename = "self-harm/instructions")]
202    pub self_harm_instructions: Vec<ModInputType>,
203
204    /// The applied input type(s) for the category 'sexual'.
205    pub sexual: Vec<ModInputType>,
206
207    /// The applied input type(s) for the category 'sexual/minors'.
208    #[serde(rename = "sexual/minors")]
209    pub sexual_minors: Vec<ModInputType>,
210
211    /// The applied input type(s) for the category 'violence'.
212    pub violence: Vec<ModInputType>,
213
214    /// The applied input type(s) for the category 'violence/graphic'.
215    #[serde(rename = "violence/graphic")]
216    pub violence_graphic: Vec<ModInputType>,
217}
218
219/// The type of input that was moderated
220#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
221#[serde(rename_all = "lowercase")]
222pub enum ModInputType {
223    /// Text content that was moderated
224    Text,
225    /// Image content that was moderated
226    Image,
227}