use serde::{ Serialize, Deserialize };
use core::time::Duration;
use std::collections::HashMap;
#[ derive( Debug, Clone, Serialize, Deserialize, PartialEq ) ]
pub struct SafetyConfiguration
{
pub content_filtering_enabled : bool,
pub harm_prevention_level : HarmPreventionLevel,
pub allowed_content_types : Vec< ContentType >,
pub blocked_content_types : Vec< ContentType >,
pub custom_safety_rules : Option< Vec< String > >,
pub audit_logging_enabled : bool,
pub compliance_mode : ComplianceMode,
}
#[ derive( Debug, Clone, Serialize, Deserialize, PartialEq ) ]
pub enum HarmPreventionLevel
{
Low,
Medium,
High,
Maximum,
}
#[ derive( Debug, Clone, Serialize, Deserialize, PartialEq ) ]
pub enum ContentType
{
Text,
Educational,
Adult,
Violence,
Medical,
Legal,
Financial,
}
#[ derive( Debug, Clone, Serialize, Deserialize, PartialEq ) ]
pub enum ComplianceMode
{
Standard,
Strict,
Regulatory,
}
#[ derive( Debug, Clone, Serialize ) ]
pub struct ContentFilterRequest
{
pub content : String,
pub safety_config : SafetyConfiguration,
pub filter_categories : Vec< FilterCategory >,
pub severity_threshold : SeverityLevel,
}
#[ derive( Debug, Clone, Serialize, Deserialize ) ]
pub struct ContentFilterResponse
{
pub is_safe : bool,
pub passed_filters : Vec< FilterCategory >,
pub failed_filters : Vec< FilterCategory >,
pub risk_score : f64,
pub recommended_action : SafetyAction,
pub filter_results : Vec< FilterResult >,
pub audit_id : Option< String >,
}
#[ derive( Debug, Clone, Serialize, Deserialize, PartialEq ) ]
pub enum FilterCategory
{
Harassment,
Violence,
Adult,
Hate,
SelfHarm,
Illegal,
}
#[ derive( Debug, Clone, Serialize, Deserialize, PartialEq ) ]
pub enum SeverityLevel
{
Low,
Medium,
High,
Critical,
}
#[ derive( Debug, Clone, Serialize, Deserialize, PartialEq ) ]
pub enum SafetyAction
{
Allow,
Warn,
Block,
Review,
}
#[ derive( Debug, Clone, Serialize, Deserialize ) ]
pub struct FilterResult
{
pub category : FilterCategory,
pub passed : bool,
pub confidence : f64,
pub explanation : Option< String >,
}
#[ derive( Debug, Clone, Serialize ) ]
pub struct HarmClassificationRequest
{
pub content : String,
pub classification_categories : Vec< HarmType >,
pub confidence_threshold : f64,
pub include_explanations : bool,
}
#[ derive( Debug, Clone, Serialize, Deserialize ) ]
pub struct HarmClassificationResponse
{
pub is_safe : bool,
pub harm_categories : Vec< HarmCategory >,
pub overall_risk_score : f64,
pub recommended_action : SafetyAction,
pub policy_violations : Vec< String >,
pub audit_id : Option< String >,
}
#[ derive( Debug, Clone, Serialize, Deserialize, PartialEq ) ]
pub enum HarmType
{
Violence,
Harassment,
Adult,
Hate,
SelfHarm,
Illegal,
Misinformation,
}
#[ derive( Debug, Clone, Serialize, Deserialize ) ]
pub struct HarmCategory
{
pub category : HarmType,
pub confidence : f64,
pub severity : SeverityLevel,
pub description : String,
}
#[ derive( Debug, Clone, Serialize, Deserialize ) ]
pub struct SafetyPolicyEnforcement
{
pub enforcement_level : EnforcementLevel,
pub auto_block_violations : bool,
pub require_human_review : Vec< HarmType >,
pub escalation_rules : Vec< EscalationRule >,
pub compliance_reporting : ComplianceReporting,
}
#[ derive( Debug, Clone, Serialize, Deserialize, PartialEq ) ]
pub enum EnforcementLevel
{
Permissive,
Standard,
Strict,
Maximum,
}
#[ derive( Debug, Clone, Serialize, Deserialize ) ]
pub struct EscalationRule
{
pub trigger : EscalationTrigger,
pub action : EscalationAction,
pub notification_channels : Vec< String >,
}
#[ derive( Debug, Clone, Serialize, Deserialize ) ]
pub enum EscalationTrigger
{
HighRiskScore( f64 ),
RepeatedViolations( u32, Duration ),
HarmTypeDetected( HarmType ),
}
#[ derive( Debug, Clone, Serialize, Deserialize ) ]
pub enum EscalationAction
{
AlertAdministrator,
TemporaryBlock( Duration ),
RequireReview,
LogIncident,
}
#[ derive( Debug, Clone, Serialize, Deserialize ) ]
pub struct ComplianceReporting
{
pub enabled : bool,
pub report_frequency : ReportFrequency,
pub include_audit_trail : bool,
pub retention_period_days : u32,
}
#[ derive( Debug, Clone, Serialize, Deserialize, PartialEq ) ]
pub enum ReportFrequency
{
Daily,
Weekly,
Monthly,
Quarterly,
}
#[ derive( Debug, Clone, Serialize, Deserialize ) ]
pub struct ComplianceAuditTrail
{
pub audit_id : String,
pub timestamp : String,
pub user_id : Option< String >,
pub request_content : String,
pub safety_assessment : SafetyAssessment,
pub compliance_status : ComplianceStatus,
pub review_required : bool,
pub metadata : HashMap< String, String >,
}
#[ derive( Debug, Clone, Serialize, Deserialize ) ]
pub struct SafetyAssessment
{
pub risk_score : f64,
pub detected_categories : Vec< HarmType >,
pub policy_violations : Vec< String >,
pub action_taken : SafetyAction,
}
#[ derive( Debug, Clone, Serialize, Deserialize, PartialEq ) ]
pub enum ComplianceStatus
{
Compliant,
Flagged,
Violation,
PendingReview,
}
#[ derive( Debug, Clone, Serialize ) ]
pub struct ComplianceReportRequest
{
pub report_type : ReportType,
pub start_date : String,
pub end_date : String,
pub include_details : bool,
pub format : ReportFormat,
}
#[ derive( Debug, Clone, Serialize, Deserialize, PartialEq ) ]
pub enum ReportType
{
SafetyViolations,
ContentFiltering,
HarmClassification,
AuditTrail,
Comprehensive,
}
#[ derive( Debug, Clone, Serialize, Deserialize, PartialEq ) ]
pub enum ReportFormat
{
Json,
Csv,
Pdf,
Html,
}
#[ derive( Debug, Clone, Serialize, Deserialize ) ]
pub struct ComplianceReportResponse
{
pub report_id : String,
pub generated_at : String,
pub total_requests : u64,
pub violations_detected : u64,
pub violation_summary : HashMap< String, u64 >,
pub report_data : String,
pub download_url : Option< String >,
}
#[ derive( Debug, Clone, Serialize, Deserialize ) ]
pub struct SafetyStatus
{
pub safety_enabled : bool,
pub current_config : Option< SafetyConfiguration >,
pub requests_processed : u64,
pub violations_detected : u64,
pub last_updated : String,
}
#[ derive( Debug, Clone, Serialize, Deserialize ) ]
pub struct SafetyPerformanceMetrics
{
pub total_requests_processed : u64,
pub average_classification_time_ms : f64,
pub cache_hit_rate : f64,
pub false_positive_rate : f64,
pub false_negative_rate : f64,
pub uptime_percentage : f64,
}
impl SafetyConfiguration
{
#[ inline ]
#[ must_use ]
pub fn new() -> Self
{
Self
{
content_filtering_enabled : true,
harm_prevention_level : HarmPreventionLevel::Medium,
allowed_content_types : vec![ ContentType::Text, ContentType::Educational ],
blocked_content_types : vec![ ContentType::Adult, ContentType::Violence ],
custom_safety_rules : None,
audit_logging_enabled : true,
compliance_mode : ComplianceMode::Standard,
}
}
#[ inline ]
#[ must_use ]
pub fn with_content_filtering( mut self, enabled : bool ) -> Self
{
self.content_filtering_enabled = enabled;
self
}
#[ inline ]
#[ must_use ]
pub fn with_harm_prevention_level( mut self, level : HarmPreventionLevel ) -> Self
{
self.harm_prevention_level = level;
self
}
#[ inline ]
#[ must_use ]
pub fn with_allowed_content_types( mut self, types : Vec< ContentType > ) -> Self
{
self.allowed_content_types = types;
self
}
#[ inline ]
#[ must_use ]
pub fn with_blocked_content_types( mut self, types : Vec< ContentType > ) -> Self
{
self.blocked_content_types = types;
self
}
#[ inline ]
#[ must_use ]
pub fn with_custom_safety_rules( mut self, rules : Vec< String > ) -> Self
{
self.custom_safety_rules = Some( rules );
self
}
#[ inline ]
#[ must_use ]
pub fn with_audit_logging( mut self, enabled : bool ) -> Self
{
self.audit_logging_enabled = enabled;
self
}
#[ inline ]
#[ must_use ]
pub fn with_compliance_mode( mut self, mode : ComplianceMode ) -> Self
{
self.compliance_mode = mode;
self
}
}
impl Default for SafetyConfiguration
{
#[ inline ]
fn default() -> Self
{
Self::new()
}
}
impl ContentFilterRequest
{
#[ inline ]
#[ must_use ]
pub fn new( content : String, safety_config : SafetyConfiguration ) -> Self
{
Self
{
content,
safety_config,
filter_categories : vec![
FilterCategory::Adult,
FilterCategory::Violence,
FilterCategory::Harassment,
FilterCategory::Hate,
],
severity_threshold : SeverityLevel::Medium,
}
}
#[ inline ]
#[ must_use ]
pub fn with_filter_categories( mut self, categories : Vec< FilterCategory > ) -> Self
{
self.filter_categories = categories;
self
}
#[ inline ]
#[ must_use ]
pub fn with_severity_threshold( mut self, threshold : SeverityLevel ) -> Self
{
self.severity_threshold = threshold;
self
}
}
impl HarmClassificationRequest
{
#[ inline ]
#[ must_use ]
pub fn new( content : String ) -> Self
{
Self
{
content,
classification_categories : vec![
HarmType::Violence,
HarmType::Harassment,
HarmType::Adult,
HarmType::Hate,
HarmType::SelfHarm,
HarmType::Illegal,
],
confidence_threshold : 0.7,
include_explanations : true,
}
}
#[ inline ]
#[ must_use ]
pub fn with_classification_categories( mut self, categories : Vec< HarmType > ) -> Self
{
self.classification_categories = categories;
self
}
#[ inline ]
#[ must_use ]
pub fn with_confidence_threshold( mut self, threshold : f64 ) -> Self
{
self.confidence_threshold = threshold.clamp( 0.0, 1.0 );
self
}
#[ inline ]
#[ must_use ]
pub fn with_explanations( mut self, include : bool ) -> Self
{
self.include_explanations = include;
self
}
}
#[ inline ]
pub fn validate_safety_configuration( config : &SafetyConfiguration ) -> Result< (), String >
{
for allowed_type in &config.allowed_content_types
{
if config.blocked_content_types.contains( allowed_type )
{
return Err( format!( "Content type {allowed_type:?} cannot be both allowed and blocked" ) );
}
}
if config.harm_prevention_level == HarmPreventionLevel::Low && config.compliance_mode == ComplianceMode::Strict
{
return Err( "Low harm prevention level is incompatible with strict compliance mode".to_string() );
}
Ok( () )
}