portkey_sdk/model/
moderations.rs

1use serde::{Deserialize, Serialize};
2
3/// Request to create a moderation.
4///
5/// # Example
6///
7/// ```rust,ignore
8/// use portkey::model::CreateModerationRequest;
9///
10/// let request = CreateModerationRequest::builder()
11///     .input("I want to hurt someone")
12///     .build()
13///     .unwrap();
14/// ```
15#[derive(Clone, Debug, Serialize, Deserialize)]
16pub struct CreateModerationRequest {
17    /// The input text to classify.
18    pub input: ModerationInput,
19
20    /// The content moderation model you would like to use.
21    /// Defaults to "text-moderation-latest".
22    #[serde(skip_serializing_if = "Option::is_none")]
23    pub model: Option<String>,
24}
25
26impl Default for CreateModerationRequest {
27    fn default() -> Self {
28        Self {
29            input: ModerationInput::String(String::new()),
30            model: Some("text-moderation-latest".to_string()),
31        }
32    }
33}
34
35/// Input for moderation can be a single string or an array of strings.
36#[derive(Clone, Debug, Serialize, Deserialize)]
37#[serde(untagged)]
38pub enum ModerationInput {
39    String(String),
40    Array(Vec<String>),
41}
42
43impl From<String> for ModerationInput {
44    fn from(s: String) -> Self {
45        ModerationInput::String(s)
46    }
47}
48
49impl From<&str> for ModerationInput {
50    fn from(s: &str) -> Self {
51        ModerationInput::String(s.to_string())
52    }
53}
54
55impl From<Vec<String>> for ModerationInput {
56    fn from(v: Vec<String>) -> Self {
57        ModerationInput::Array(v)
58    }
59}
60
61/// Response from the moderation endpoint.
62#[derive(Clone, Debug, Serialize, Deserialize)]
63pub struct ModerationResponse {
64    /// The unique identifier for the moderation request.
65    pub id: String,
66
67    /// The model used for moderation.
68    pub model: String,
69
70    /// A list of moderation objects.
71    pub results: Vec<ModerationResult>,
72}
73
74/// A single moderation result.
75#[derive(Clone, Debug, Serialize, Deserialize)]
76pub struct ModerationResult {
77    /// Whether any of the below categories are flagged.
78    pub flagged: bool,
79
80    /// A list of the categories, and whether they are flagged or not.
81    pub categories: ModerationCategories,
82
83    /// A list of the categories along with their scores as predicted by model.
84    pub category_scores: ModerationCategoryScores,
85}
86
87/// Categories that were checked in the moderation.
88#[derive(Clone, Debug, Serialize, Deserialize)]
89pub struct ModerationCategories {
90    /// Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.
91    pub hate: bool,
92
93    /// Hateful content that also includes violence or serious harm towards the targeted group.
94    #[serde(rename = "hate/threatening")]
95    pub hate_threatening: bool,
96
97    /// Content that expresses, incites, or promotes harassing language towards any target.
98    pub harassment: bool,
99
100    /// Harassment content that also includes violence or serious harm towards any target.
101    #[serde(rename = "harassment/threatening")]
102    pub harassment_threatening: bool,
103
104    /// Content that promotes, encourages, or depicts acts of self-harm.
105    #[serde(rename = "self-harm")]
106    pub self_harm: bool,
107
108    /// Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm.
109    #[serde(rename = "self-harm/intent")]
110    pub self_harm_intent: bool,
111
112    /// Content that encourages performing acts of self-harm.
113    #[serde(rename = "self-harm/instructions")]
114    pub self_harm_instructions: bool,
115
116    /// Content meant to arouse sexual excitement.
117    pub sexual: bool,
118
119    /// Sexual content that includes an individual who is under 18 years old.
120    #[serde(rename = "sexual/minors")]
121    pub sexual_minors: bool,
122
123    /// Content that depicts death, violence, or physical injury.
124    pub violence: bool,
125
126    /// Content that depicts death, violence, or physical injury in graphic detail.
127    #[serde(rename = "violence/graphic")]
128    pub violence_graphic: bool,
129}
130
131/// Scores for each moderation category.
132#[derive(Clone, Debug, Serialize, Deserialize)]
133pub struct ModerationCategoryScores {
134    /// Score for hate content.
135    pub hate: f64,
136
137    /// Score for threatening hate content.
138    #[serde(rename = "hate/threatening")]
139    pub hate_threatening: f64,
140
141    /// Score for harassment content.
142    pub harassment: f64,
143
144    /// Score for threatening harassment content.
145    #[serde(rename = "harassment/threatening")]
146    pub harassment_threatening: f64,
147
148    /// Score for self-harm content.
149    #[serde(rename = "self-harm")]
150    pub self_harm: f64,
151
152    /// Score for self-harm intent content.
153    #[serde(rename = "self-harm/intent")]
154    pub self_harm_intent: f64,
155
156    /// Score for self-harm instructions content.
157    #[serde(rename = "self-harm/instructions")]
158    pub self_harm_instructions: f64,
159
160    /// Score for sexual content.
161    pub sexual: f64,
162
163    /// Score for sexual content involving minors.
164    #[serde(rename = "sexual/minors")]
165    pub sexual_minors: f64,
166
167    /// Score for violent content.
168    pub violence: f64,
169
170    /// Score for graphic violent content.
171    #[serde(rename = "violence/graphic")]
172    pub violence_graphic: f64,
173}