dynamo_async_openai/types/
moderation.rs

1// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2// SPDX-License-Identifier: Apache-2.0
3//
4// Based on https://github.com/64bit/async-openai/ by Himanshu Neema
5// Original Copyright (c) 2022 Himanshu Neema
6// Licensed under MIT License (see ATTRIBUTIONS-Rust.md)
7//
8// Modifications Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES.
9// Licensed under Apache 2.0
10
11use derive_builder::Builder;
12use serde::{Deserialize, Serialize};
13
14use crate::error::OpenAIError;
15
16#[derive(Debug, Serialize, Clone, PartialEq, Deserialize)]
17#[serde(untagged)]
18pub enum ModerationInput {
19    /// A single string of text to classify for moderation
20    String(String),
21
22    /// An array of strings to classify for moderation
23    StringArray(Vec<String>),
24
25    /// An array of multi-modal inputs to the moderation model
26    MultiModal(Vec<ModerationContentPart>),
27}
28
29/// Content part for multi-modal moderation input
30#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
31#[serde(tag = "type")]
32pub enum ModerationContentPart {
33    /// An object describing text to classify
34    #[serde(rename = "text")]
35    Text {
36        /// A string of text to classify
37        text: String,
38    },
39
40    /// An object describing an image to classify
41    #[serde(rename = "image_url")]
42    ImageUrl {
43        /// Contains either an image URL or a data URL for a base64 encoded image
44        image_url: ModerationImageUrl,
45    },
46}
47
48/// Image URL configuration for image moderation
49#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
50pub struct ModerationImageUrl {
51    /// Either a URL of the image or the base64 encoded image data
52    pub url: String,
53}
54
55#[derive(Debug, Default, Clone, Serialize, Builder, PartialEq, Deserialize)]
56#[builder(name = "CreateModerationRequestArgs")]
57#[builder(pattern = "mutable")]
58#[builder(setter(into, strip_option), default)]
59#[builder(derive(Debug))]
60#[builder(build_fn(error = "OpenAIError"))]
61pub struct CreateModerationRequest {
62    /// Input (or inputs) to classify. Can be a single string, an array of strings, or
63    /// an array of multi-modal input objects similar to other models.
64    pub input: ModerationInput,
65
66    /// The content moderation model you would like to use. Learn more in the
67    /// [moderation guide](https://platform.openai.com/docs/guides/moderation), and learn about
68    /// available models [here](https://platform.openai.com/docs/models/moderation).
69    #[serde(skip_serializing_if = "Option::is_none")]
70    pub model: Option<String>,
71}
72
73#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
74pub struct Category {
75    /// Content that expresses, incites, or promotes hate based on race, gender,
76    /// ethnicity, religion, nationality, sexual orientation, disability status, or
77    /// caste. Hateful content aimed at non-protected groups (e.g., chess players)
78    /// is harrassment.
79    pub hate: bool,
80    #[serde(rename = "hate/threatening")]
81    /// Hateful content that also includes violence or serious harm towards the
82    /// targeted group based on race, gender, ethnicity, religion, nationality,
83    /// sexual orientation, disability status, or caste.
84    pub hate_threatening: bool,
85    /// Content that expresses, incites, or promotes harassing language towards any target.
86    pub harassment: bool,
87    /// Harassment content that also includes violence or serious harm towards any target.
88    #[serde(rename = "harassment/threatening")]
89    pub harassment_threatening: bool,
90    /// Content that includes instructions or advice that facilitate the planning or execution of wrongdoing, or that gives advice or instruction on how to commit illicit acts. For example, "how to shoplift" would fit this category.
91    pub illicit: bool,
92    /// Content that includes instructions or advice that facilitate the planning or execution of wrongdoing that also includes violence, or that gives advice or instruction on the procurement of any weapon.
93    #[serde(rename = "illicit/violent")]
94    pub illicit_violent: bool,
95    /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.
96    #[serde(rename = "self-harm")]
97    pub self_harm: bool,
98    /// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.
99    #[serde(rename = "self-harm/intent")]
100    pub self_harm_intent: bool,
101    /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.
102    #[serde(rename = "self-harm/instructions")]
103    pub self_harm_instructions: bool,
104    /// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).
105    pub sexual: bool,
106    /// Sexual content that includes an individual who is under 18 years old.
107    #[serde(rename = "sexual/minors")]
108    pub sexual_minors: bool,
109    /// Content that depicts death, violence, or physical injury.
110    pub violence: bool,
111    /// Content that depicts death, violence, or physical injury in graphic detail.
112    #[serde(rename = "violence/graphic")]
113    pub violence_graphic: bool,
114}
115
116/// A list of the categories along with their scores as predicted by model.
117#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
118pub struct CategoryScore {
119    /// The score for the category 'hate'.
120    pub hate: f32,
121    /// The score for the category 'hate/threatening'.
122    #[serde(rename = "hate/threatening")]
123    pub hate_threatening: f32,
124    /// The score for the category 'harassment'.
125    pub harassment: f32,
126    /// The score for the category 'harassment/threatening'.
127    #[serde(rename = "harassment/threatening")]
128    pub harassment_threatening: f32,
129    /// The score for the category 'illicit'.
130    pub illicit: f32,
131    /// The score for the category 'illicit/violent'.
132    #[serde(rename = "illicit/violent")]
133    pub illicit_violent: f32,
134    /// The score for the category 'self-harm'.
135    #[serde(rename = "self-harm")]
136    pub self_harm: f32,
137    /// The score for the category 'self-harm/intent'.
138    #[serde(rename = "self-harm/intent")]
139    pub self_harm_intent: f32,
140    /// The score for the category 'self-harm/instructions'.
141    #[serde(rename = "self-harm/instructions")]
142    pub self_harm_instructions: f32,
143    /// The score for the category 'sexual'.
144    pub sexual: f32,
145    /// The score for the category 'sexual/minors'.
146    #[serde(rename = "sexual/minors")]
147    pub sexual_minors: f32,
148    /// The score for the category 'violence'.
149    pub violence: f32,
150    /// The score for the category 'violence/graphic'.
151    #[serde(rename = "violence/graphic")]
152    pub violence_graphic: f32,
153}
154
155#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
156pub struct ContentModerationResult {
157    /// Whether any of the below categories are flagged.
158    pub flagged: bool,
159    /// A list of the categories, and whether they are flagged or not.
160    pub categories: Category,
161    /// A list of the categories along with their scores as predicted by model.
162    pub category_scores: CategoryScore,
163    /// A list of the categories along with the input type(s) that the score applies to.
164    pub category_applied_input_types: CategoryAppliedInputTypes,
165}
166
167/// Represents if a given text input is potentially harmful.
168#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
169pub struct CreateModerationResponse {
170    /// The unique identifier for the moderation request.
171    pub id: String,
172    /// The model used to generate the moderation results.
173    pub model: String,
174    /// A list of moderation objects.
175    pub results: Vec<ContentModerationResult>,
176}
177
178/// A list of the categories along with the input type(s) that the score applies to.
179#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
180pub struct CategoryAppliedInputTypes {
181    /// The applied input type(s) for the category 'hate'.
182    pub hate: Vec<ModInputType>,
183
184    /// The applied input type(s) for the category 'hate/threatening'.
185    #[serde(rename = "hate/threatening")]
186    pub hate_threatening: Vec<ModInputType>,
187
188    /// The applied input type(s) for the category 'harassment'.
189    pub harassment: Vec<ModInputType>,
190
191    /// The applied input type(s) for the category 'harassment/threatening'.
192    #[serde(rename = "harassment/threatening")]
193    pub harassment_threatening: Vec<ModInputType>,
194
195    /// The applied input type(s) for the category 'illicit'.
196    pub illicit: Vec<ModInputType>,
197
198    /// The applied input type(s) for the category 'illicit/violent'.
199    #[serde(rename = "illicit/violent")]
200    pub illicit_violent: Vec<ModInputType>,
201
202    /// The applied input type(s) for the category 'self-harm'.
203    #[serde(rename = "self-harm")]
204    pub self_harm: Vec<ModInputType>,
205
206    /// The applied input type(s) for the category 'self-harm/intent'.
207    #[serde(rename = "self-harm/intent")]
208    pub self_harm_intent: Vec<ModInputType>,
209
210    /// The applied input type(s) for the category 'self-harm/instructions'.
211    #[serde(rename = "self-harm/instructions")]
212    pub self_harm_instructions: Vec<ModInputType>,
213
214    /// The applied input type(s) for the category 'sexual'.
215    pub sexual: Vec<ModInputType>,
216
217    /// The applied input type(s) for the category 'sexual/minors'.
218    #[serde(rename = "sexual/minors")]
219    pub sexual_minors: Vec<ModInputType>,
220
221    /// The applied input type(s) for the category 'violence'.
222    pub violence: Vec<ModInputType>,
223
224    /// The applied input type(s) for the category 'violence/graphic'.
225    #[serde(rename = "violence/graphic")]
226    pub violence_graphic: Vec<ModInputType>,
227}
228
229/// The type of input that was moderated
230#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
231#[serde(rename_all = "lowercase")]
232pub enum ModInputType {
233    /// Text content that was moderated
234    Text,
235    /// Image content that was moderated
236    Image,
237}