zai_rs/model/moderation/
models.rs

1//! Content moderation API models and types.
2//!
3//! This module provides data structures for content moderation requests and responses,
4//! supporting text, image, audio, and video content safety analysis.
5//!
6//! ## Features
7//!
8//! - **Multi-format support** - Text, image, audio, and video content moderation
9//! - **Risk detection** - Identifies pornographic, violent, and illegal content
10//! - **Structured results** - Detailed risk level and type information
11//! - **Validation** - Input validation using the validator crate
12
13use serde::{Deserialize, Deserializer, Serialize};
14use validator::Validate;
15
16// Helper: accept string or number and always deserialize into Option<String>
17fn de_opt_string_from_number_or_string<'de, D>(deserializer: D) -> Result<Option<String>, D::Error>
18where
19    D: Deserializer<'de>,
20{
21    let v = serde_json::Value::deserialize(deserializer)?;
22    match v {
23        serde_json::Value::Null => Ok(None),
24        serde_json::Value::String(s) => Ok(Some(s)),
25        serde_json::Value::Number(n) => Ok(Some(n.to_string())),
26        other => Err(serde::de::Error::custom(format!(
27            "expected string or number, got {}",
28            other
29        ))),
30    }
31}
32
33/// Content moderation model type.
34#[derive(Debug, Clone, Serialize, Deserialize)]
35pub enum ModerationModel {
36    /// Default moderation model
37    #[serde(rename = "moderation")]
38    Moderation,
39}
40
41impl Default for ModerationModel {
42    fn default() -> Self {
43        Self::Moderation
44    }
45}
46
47/// Moderation input content.
48#[derive(Debug, Clone, Serialize, Deserialize)]
49#[serde(untagged)]
50pub enum ModerationInput {
51    /// Text content for moderation
52    Text(String),
53    /// Multimedia content with type and URL
54    Multimedia(MultimediaInput),
55}
56
57/// Multimedia input for content moderation.
58#[derive(Debug, Clone, Serialize, Deserialize, Validate)]
59pub struct MultimediaInput {
60    /// Content type (image, audio, video)
61    #[serde(rename = "type")]
62    pub content_type: MediaType,
63    /// URL to the multimedia content
64    #[validate(url)]
65    pub url: String,
66}
67
68/// Media types supported for moderation.
69#[derive(Debug, Clone, Serialize, Deserialize)]
70pub enum MediaType {
71    /// Image content
72    #[serde(rename = "image")]
73    Image,
74    /// Audio content
75    #[serde(rename = "audio")]
76    Audio,
77    /// Video content
78    #[serde(rename = "video")]
79    Video,
80}
81
82/// Content moderation request.
83#[derive(Debug, Clone, Serialize, Deserialize)]
84pub struct ModerationRequest {
85    /// Moderation model to use
86    #[serde(default)]
87    pub model: ModerationModel,
88    /// Content to moderate
89    pub input: ModerationInput,
90}
91
92impl ModerationRequest {
93    /// Create a new moderation request with text content.
94    pub fn new_text(text: impl Into<String>) -> Self {
95        Self {
96            model: ModerationModel::default(),
97            input: ModerationInput::Text(text.into()),
98        }
99    }
100
101    /// Create a new moderation request with multimedia content.
102    pub fn new_multimedia(content_type: MediaType, url: impl Into<String>) -> Self {
103        Self {
104            model: ModerationModel::default(),
105            input: ModerationInput::Multimedia(MultimediaInput {
106                content_type,
107                url: url.into(),
108            }),
109        }
110    }
111
112    /// Validates the moderation request parameters.
113    pub fn validate(&self) -> Result<(), validator::ValidationErrors> {
114        let mut errors = validator::ValidationErrors::new();
115
116        // Validate text input length
117        if let ModerationInput::Text(text) = &self.input {
118            if text.len() > 2000 {
119                errors.add(
120                    "input",
121                    validator::ValidationError::new("text_length_exceeded"),
122                );
123            }
124        }
125
126        // Validate multimedia URL
127        if let ModerationInput::Multimedia(multimedia) = &self.input {
128            if multimedia.url.parse::<url::Url>().is_err() {
129                errors.add("input", validator::ValidationError::new("invalid_url"));
130            }
131        }
132
133        if errors.is_empty() {
134            Ok(())
135        } else {
136            Err(errors)
137        }
138    }
139}
140
141/// Risk level for moderated content.
142#[derive(Debug, Clone, Serialize, Deserialize)]
143pub enum RiskLevel {
144    /// Normal content, no risks detected
145    #[serde(rename = "PASS")]
146    Pass,
147    /// Suspicious content, requires review
148    #[serde(rename = "REVIEW")]
149    Review,
150    /// Violating content, should be rejected
151    #[serde(rename = "REJECT")]
152    Reject,
153}
154
155/// Risk types that can be detected.
156#[derive(Debug, Clone, Serialize, Deserialize)]
157pub enum RiskType {
158    /// Pornographic or adult content
159    #[serde(rename = "porn")]
160    Porn,
161    /// Violent or gory content
162    #[serde(rename = "violence")]
163    Violence,
164    /// Illegal or criminal content
165    #[serde(rename = "illegal")]
166    Illegal,
167    /// Political or sensitive content
168    #[serde(rename = "politics")]
169    Politics,
170    /// Other risk types
171    #[serde(rename = "other")]
172    Other,
173}
174
175/// Moderation result for a single content item.
176#[derive(Debug, Clone, Serialize, Deserialize)]
177pub struct ModerationResult {
178    /// Type of content that was moderated
179    #[serde(rename = "content_type")]
180    pub content_type: String,
181    /// Risk level assessment
182    #[serde(rename = "risk_level")]
183    pub risk_level: RiskLevel,
184    /// List of detected risk types
185    #[serde(rename = "risk_type")]
186    pub risk_types: Vec<String>,
187}
188
189/// Usage statistics for moderation API.
190#[derive(Debug, Clone, Serialize, Deserialize)]
191pub struct ModerationUsage {
192    /// Text moderation usage statistics
193    #[serde(rename = "moderation_text")]
194    pub moderation_text: ModerationTextUsage,
195}
196
197/// Text moderation usage statistics.
198#[derive(Debug, Clone, Serialize, Deserialize)]
199pub struct ModerationTextUsage {
200    /// Number of text moderation calls
201    #[serde(rename = "call_count")]
202    pub call_count: u32,
203}
204
205/// Content moderation response.
206#[derive(Debug, Clone, Serialize, Deserialize)]
207pub struct ModerationResponse {
208    /// Task ID
209    #[serde(skip_serializing_if = "Option::is_none")]
210    pub id: Option<String>,
211    /// Request creation time (Unix timestamp in seconds)
212    #[serde(skip_serializing_if = "Option::is_none")]
213    pub created: Option<u64>,
214    /// Request identifier
215    #[serde(
216        rename = "request_id",
217        skip_serializing_if = "Option::is_none",
218        deserialize_with = "de_opt_string_from_number_or_string"
219    )]
220    pub request_id: Option<String>,
221    /// List of moderation results
222    #[serde(rename = "result_list", skip_serializing_if = "Option::is_none")]
223    pub result_list: Option<Vec<ModerationResult>>,
224    /// Usage statistics
225    #[serde(skip_serializing_if = "Option::is_none")]
226    pub usage: Option<ModerationUsage>,
227}