async_openai_types/types/
moderation.rs

1use derive_builder::Builder;
2use serde::{Deserialize, Serialize};
3
4use crate::error::OpenAIError;
5
6#[derive(Debug, Serialize, Clone, PartialEq, Deserialize)]
7#[serde(untagged)]
8pub enum ModerationInput {
9    String(String),
10    StringArray(Vec<String>),
11}
12
13#[derive(Debug, Serialize, Default, Clone, Copy, PartialEq, Deserialize)]
14pub enum TextModerationModel {
15    #[default]
16    #[serde(rename = "text-moderation-latest")]
17    Latest,
18    #[serde(rename = "text-moderation-stable")]
19    Stable,
20}
21
22#[derive(Debug, Default, Clone, Serialize, Builder, PartialEq, Deserialize)]
23#[builder(name = "CreateModerationRequestArgs")]
24#[builder(pattern = "mutable")]
25#[builder(setter(into, strip_option), default)]
26#[builder(derive(Debug))]
27#[builder(build_fn(error = "OpenAIError"))]
28pub struct CreateModerationRequest {
29    /// The input text to classify
30    pub input: ModerationInput,
31
32    /// Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`.
33    ///
34    /// The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`.
35    #[serde(skip_serializing_if = "Option::is_none")]
36    pub model: Option<TextModerationModel>,
37}
38
39#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
40pub struct Category {
41    /// Content that expresses, incites, or promotes hate based on race, gender,
42    /// ethnicity, religion, nationality, sexual orientation, disability status, or
43    /// caste. Hateful content aimed at non-protected groups (e.g., chess players)
44    /// is harrassment.
45    pub hate: bool,
46    #[serde(rename = "hate/threatening")]
47    /// Hateful content that also includes violence or serious harm towards the
48    /// targeted group based on race, gender, ethnicity, religion, nationality,
49    /// sexual orientation, disability status, or caste.
50    pub hate_threatening: bool,
51    /// Content that expresses, incites, or promotes harassing language towards any target.
52    pub harassment: bool,
53    /// Harassment content that also includes violence or serious harm towards any target.
54    #[serde(rename = "harassment/threatening")]
55    pub harassment_threatening: bool,
56    /// Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.
57    #[serde(rename = "self-harm")]
58    pub self_harm: bool,
59    /// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders.
60    #[serde(rename = "self-harm/intent")]
61    pub self_harm_intent: bool,
62    /// Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts.
63    #[serde(rename = "self-harm/instructions")]
64    pub self_harm_instructions: bool,
65    /// Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).
66    pub sexual: bool,
67    /// Sexual content that includes an individual who is under 18 years old.
68    #[serde(rename = "sexual/minors")]
69    pub sexual_minors: bool,
70    /// Content that depicts death, violence, or physical injury.
71    pub violence: bool,
72    /// Content that depicts death, violence, or physical injury in graphic detail.
73    #[serde(rename = "violence/graphic")]
74    pub violence_graphic: bool,
75}
76
77/// A list of the categories along with their scores as predicted by model.
78#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
79pub struct CategoryScore {
80    /// The score for the category 'hate'.
81    pub hate: f32,
82    /// The score for the category 'hate/threatening'.
83    #[serde(rename = "hate/threatening")]
84    pub hate_threatening: f32,
85    /// The score for the category 'harassment'.
86    pub harassment: f32,
87    /// The score for the category 'harassment/threatening'.
88    #[serde(rename = "harassment/threatening")]
89    pub harassment_threatening: f32,
90    /// The score for the category 'self-harm'.
91    #[serde(rename = "self-harm")]
92    pub self_harm: f32,
93    /// The score for the category 'self-harm/intent'.
94    #[serde(rename = "self-harm/intent")]
95    pub self_harm_intent: f32,
96    /// The score for the category 'self-harm/instructions'.
97    #[serde(rename = "self-harm/instructions")]
98    pub self_harm_instructions: f32,
99    /// The score for the category 'sexual'.
100    pub sexual: f32,
101    /// The score for the category 'sexual/minors'.
102    #[serde(rename = "sexual/minors")]
103    pub sexual_minors: f32,
104    /// The score for the category 'violence'.
105    pub violence: f32,
106    /// The score for the category 'violence/graphic'.
107    #[serde(rename = "violence/graphic")]
108    pub violence_graphic: f32,
109}
110
111#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
112pub struct ContentModerationResult {
113    /// Whether any of the below categories are flagged.
114    pub flagged: bool,
115    /// A list of the categories, and whether they are flagged or not.
116    pub categories: Category,
117    /// A list of the categories along with their scores as predicted by model.
118    pub category_scores: CategoryScore,
119}
120
121/// Represents if a given text input is potentially harmful.
122#[derive(Debug, Deserialize, Serialize, Clone, PartialEq)]
123pub struct CreateModerationResponse {
124    /// The unique identifier for the moderation request.
125    pub id: String,
126    /// The model used to generate the moderation results.
127    pub model: String,
128    /// A list of moderation objects.
129    pub results: Vec<ContentModerationResult>,
130}