api_openai/components/moderations.rs
1//! Structures related to content moderation results.
2
3/// Define a private namespace for all its items.
4mod private
5{
6 // Serde imports
7 use serde::{ Serialize, Deserialize }; // Added Serialize
8
9 /// Represents the boolean flags for each moderation category.
10 ///
11 /// # Used By
12 /// - `ModerationResult`
13 #[ derive( Debug, Serialize, Deserialize, Clone, PartialEq ) ] // Added Serialize
14 #[ allow( clippy::struct_excessive_bools ) ]
15pub struct ModerationCategories
16 {
17 /// Content that expresses, incites, or promotes hate based on protected characteristics.
18 pub hate : bool,
19 /// Hateful content that also includes violence or serious harm towards the targeted group.
20 #[ serde( rename = "hate/threatening" ) ]
21 pub hate_threatening : bool,
22 /// Content that expresses, incites, or promotes harassing language towards any target.
23 pub harassment : bool,
24 /// Harassment content that also includes violence or serious harm towards any target.
25 #[ serde( rename = "harassment/threatening" ) ]
26 pub harassment_threatening : bool,
27 /// Content that includes instructions or advice that facilitate the planning or execution of wrongdoing.
28 pub illicit : Option< bool >,
29 /// Content that includes instructions or advice for wrongdoing that also includes violence or weapon procurement.
30 #[ serde( rename = "illicit/violent" ) ]
31 pub illicit_violent : Option< bool >,
32 /// Content that promotes, encourages, or depicts acts of self-harm.
33 #[ serde( rename = "self-harm" ) ]
34 pub self_harm : bool,
35 /// Content where the speaker expresses intent to engage in acts of self-harm.
36 #[ serde( rename = "self-harm/intent" ) ]
37 pub self_harm_intent : bool,
38 /// Content that encourages performing acts of self-harm or gives instructions on how to do so.
39 #[ serde( rename = "self-harm/instructions" ) ]
40 pub self_harm_instructions : bool,
41 /// Content meant to arouse sexual excitement or promote sexual services.
42 pub sexual : bool,
43 /// Sexual content that includes an individual who is under 18 years old.
44 #[ serde( rename = "sexual/minors" ) ]
45 pub sexual_minors : bool,
46 /// Content that depicts death, violence, or physical injury.
47 pub violence : bool,
48 /// Content that depicts death, violence, or physical injury in graphic detail.
49 #[ serde( rename = "violence/graphic" ) ]
50 pub violence_graphic : bool,
51 }
52
53 /// Represents the raw scores for each moderation category, as predicted by the model.
54 ///
55 /// # Used By
56 /// - `ModerationResult`
57 #[ derive( Debug, Serialize, Deserialize, Clone, PartialEq ) ] // Added Serialize
58 pub struct ModerationCategoryScores
59 {
60 /// The score for the 'hate' category.
61 pub hate : f64,
62 /// The score for the 'hate/threatening' category.
63 #[ serde( rename = "hate/threatening" ) ]
64 pub hate_threatening : f64,
65 /// The score for the 'harassment' category.
66 pub harassment : f64,
67 /// The score for the 'harassment/threatening' category.
68 #[ serde( rename = "harassment/threatening" ) ]
69 pub harassment_threatening : f64,
70 /// The score for the 'illicit' category.
71 pub illicit : Option< f64 >,
72 /// The score for the 'illicit/violent' category.
73 #[ serde( rename = "illicit/violent" ) ]
74 pub illicit_violent : Option< f64 >,
75 /// The score for the 'self-harm' category.
76 #[ serde( rename = "self-harm" ) ]
77 pub self_harm : f64,
78 /// The score for the 'self-harm/intent' category.
79 #[ serde( rename = "self-harm/intent" ) ]
80 pub self_harm_intent : f64,
81 /// The score for the 'self-harm/instructions' category.
82 #[ serde( rename = "self-harm/instructions" ) ]
83 pub self_harm_instructions : f64,
84 /// The score for the 'sexual' category.
85 pub sexual : f64,
86 /// The score for the 'sexual/minors' category.
87 #[ serde( rename = "sexual/minors" ) ]
88 pub sexual_minors : f64,
89 /// The score for the 'violence' category.
90 pub violence : f64,
91 /// The score for the 'violence/graphic' category.
92 #[ serde( rename = "violence/graphic" ) ]
93 pub violence_graphic : f64,
94 }
95
96 /// Indicates which input types (text, image) contributed to the score for each category.
97 /// Only available for `omni-moderation` models.
98 ///
99 /// # Used By
100 /// - `ModerationResult`
101 #[ derive( Debug, Serialize, Deserialize, Clone, PartialEq ) ] // Added Serialize
102 pub struct ModerationCategoryAppliedInputTypes
103 {
104 /// Input types applied for the 'hate' category.
105 pub hate : Vec< String >,
106 /// Input types applied for the 'hate/threatening' category.
107 #[ serde( rename = "hate/threatening" ) ]
108 pub hate_threatening : Vec< String >,
109 /// Input types applied for the 'harassment' category.
110 pub harassment : Vec< String >,
111 /// Input types applied for the 'harassment/threatening' category.
112 #[ serde( rename = "harassment/threatening" ) ]
113 pub harassment_threatening : Vec< String >,
114 /// Input types applied for the 'illicit' category.
115 pub illicit : Option< Vec< String > >,
116 /// Input types applied for the 'illicit/violent' category.
117 #[ serde( rename = "illicit/violent" ) ]
118 pub illicit_violent : Option< Vec< String > >,
119 /// Input types applied for the 'self-harm' category.
120 #[ serde( rename = "self-harm" ) ]
121 pub self_harm : Vec< String >,
122 /// Input types applied for the 'self-harm/intent' category.
123 #[ serde( rename = "self-harm/intent" ) ]
124 pub self_harm_intent : Vec< String >,
125 /// Input types applied for the 'self-harm/instructions' category.
126 #[ serde( rename = "self-harm/instructions" ) ]
127 pub self_harm_instructions : Vec< String >,
128 /// Input types applied for the 'sexual' category.
129 pub sexual : Vec< String >,
130 /// Input types applied for the 'sexual/minors' category.
131 #[ serde( rename = "sexual/minors" ) ]
132 pub sexual_minors : Vec< String >,
133 /// Input types applied for the 'violence' category.
134 pub violence : Vec< String >,
135 /// Input types applied for the 'violence/graphic' category.
136 #[ serde( rename = "violence/graphic" ) ]
137 pub violence_graphic : Vec< String >,
138 }
139
140 /// Contains the moderation analysis results for a single input.
141 ///
142 /// # Used By
143 /// - `CreateModerationResponse`
144 #[ derive( Debug, Serialize, Deserialize, Clone, PartialEq ) ] // Added Serialize
145 pub struct ModerationResult
146 {
147 /// Whether the content violates `OpenAI`'s usage policies.
148 pub flagged : bool,
149 /// A list of the categories, and whether they are flagged or not.
150 pub categories : ModerationCategories,
151 /// A list of the categories along with their scores as predicted by model.
152 pub category_scores : ModerationCategoryScores,
153 /// A list of the categories along with the input type(s) that the score applies to.
154 #[ serde( skip_serializing_if = "Option::is_none" ) ]
155 pub category_applied_input_types : Option< ModerationCategoryAppliedInputTypes >,
156 }
157
158 /// Represents the response from a moderation request.
159 ///
160 /// # Used By
161 /// - `/moderations` (POST)
162 #[ derive( Debug, Serialize, Deserialize, Clone, PartialEq ) ] // Added Serialize
163 pub struct CreateModerationResponse
164 {
165 /// The unique identifier for the moderation request.
166 pub id : String,
167 /// The model used to generate the moderation results.
168 pub model : String,
169 /// A list of moderation objects, one for each input provided in the request.
170 pub results : Vec< ModerationResult >,
171 }
172} // end mod private
173
174crate ::mod_interface!
175{
176 exposed use
177 {
178 ModerationCategories,
179 ModerationCategoryScores,
180 ModerationCategoryAppliedInputTypes,
181 ModerationResult,
182 CreateModerationResponse
183 };
184}