Skip to main content

openai_tools/moderations/
response.rs

1//! OpenAI Moderations API Response Types
2//!
3//! This module defines the response structures for the OpenAI Moderations API.
4
5use serde::{Deserialize, Serialize};
6
7/// Response structure from the moderation endpoint.
8///
9/// Contains the results of content moderation analysis.
10#[derive(Debug, Clone, Serialize, Deserialize)]
11pub struct ModerationResponse {
12    /// Unique identifier for the moderation request
13    pub id: String,
14    /// The model used for moderation
15    pub model: String,
16    /// Array of moderation results (one per input)
17    pub results: Vec<ModerationResult>,
18}
19
20/// Individual moderation result for a single input.
21#[derive(Debug, Clone, Serialize, Deserialize)]
22pub struct ModerationResult {
23    /// Whether any category was flagged
24    pub flagged: bool,
25    /// Category flags indicating which types of content were detected
26    pub categories: ModerationCategories,
27    /// Confidence scores for each category (0.0 to 1.0)
28    pub category_scores: ModerationCategoryScores,
29}
30
31/// Category flags for content moderation.
32///
33/// Each field indicates whether that category of content was detected.
34#[derive(Debug, Clone, Serialize, Deserialize)]
35pub struct ModerationCategories {
36    /// Content that expresses, incites, or promotes hate based on identity
37    pub hate: bool,
38    /// Hateful content that also includes violence or threat
39    #[serde(rename = "hate/threatening")]
40    pub hate_threatening: bool,
41    /// Content that expresses, incites, or promotes harassing language
42    pub harassment: bool,
43    /// Harassment content that also includes violence or threat
44    #[serde(rename = "harassment/threatening")]
45    pub harassment_threatening: bool,
46    /// Content that promotes, encourages, or depicts self-harm
47    #[serde(rename = "self-harm")]
48    pub self_harm: bool,
49    /// Content indicating intent to commit self-harm
50    #[serde(rename = "self-harm/intent")]
51    pub self_harm_intent: bool,
52    /// Content that provides instructions for self-harm
53    #[serde(rename = "self-harm/instructions")]
54    pub self_harm_instructions: bool,
55    /// Sexual content
56    pub sexual: bool,
57    /// Sexual content involving minors
58    #[serde(rename = "sexual/minors")]
59    pub sexual_minors: bool,
60    /// Content that depicts violence
61    pub violence: bool,
62    /// Violent content that is graphic or gory
63    #[serde(rename = "violence/graphic")]
64    pub violence_graphic: bool,
65    /// Content that is illicit (newer models only)
66    #[serde(skip_serializing_if = "Option::is_none")]
67    pub illicit: Option<bool>,
68    /// Illicit content that includes violence (newer models only)
69    #[serde(rename = "illicit/violent", skip_serializing_if = "Option::is_none")]
70    pub illicit_violent: Option<bool>,
71}
72
73/// Confidence scores for each moderation category.
74///
75/// Values range from 0.0 to 1.0, where higher values indicate
76/// higher confidence that the content belongs to that category.
77#[derive(Debug, Clone, Serialize, Deserialize)]
78pub struct ModerationCategoryScores {
79    /// Score for hate content
80    pub hate: f64,
81    /// Score for hate/threatening content
82    #[serde(rename = "hate/threatening")]
83    pub hate_threatening: f64,
84    /// Score for harassment content
85    pub harassment: f64,
86    /// Score for harassment/threatening content
87    #[serde(rename = "harassment/threatening")]
88    pub harassment_threatening: f64,
89    /// Score for self-harm content
90    #[serde(rename = "self-harm")]
91    pub self_harm: f64,
92    /// Score for self-harm/intent content
93    #[serde(rename = "self-harm/intent")]
94    pub self_harm_intent: f64,
95    /// Score for self-harm/instructions content
96    #[serde(rename = "self-harm/instructions")]
97    pub self_harm_instructions: f64,
98    /// Score for sexual content
99    pub sexual: f64,
100    /// Score for sexual/minors content
101    #[serde(rename = "sexual/minors")]
102    pub sexual_minors: f64,
103    /// Score for violence content
104    pub violence: f64,
105    /// Score for violence/graphic content
106    #[serde(rename = "violence/graphic")]
107    pub violence_graphic: f64,
108    /// Score for illicit content (newer models only)
109    #[serde(skip_serializing_if = "Option::is_none")]
110    pub illicit: Option<f64>,
111    /// Score for illicit/violent content (newer models only)
112    #[serde(rename = "illicit/violent", skip_serializing_if = "Option::is_none")]
113    pub illicit_violent: Option<f64>,
114}