litellm-rs 0.1.1

A high-performance AI Gateway written in Rust, providing OpenAI-compatible APIs with intelligent routing, load balancing, and enterprise features
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
//! Security and content filtering system
//!
//! This module provides comprehensive security features including PII detection,
//! content filtering, and compliance tools.

use crate::core::models::openai::*;
use crate::utils::error::Result;
use regex::Regex;

use std::collections::HashMap;
use tracing::warn;

/// Content filter for detecting and handling sensitive content
pub struct ContentFilter {
    /// PII detection patterns
    pii_patterns: Vec<PIIPattern>,
    /// Content moderation rules
    moderation_rules: Vec<ModerationRule>,
    /// Profanity filter
    profanity_filter: ProfanityFilter,
    /// Custom filters
    custom_filters: Vec<CustomFilter>,
}

/// PII (Personally Identifiable Information) pattern
#[derive(Debug, Clone)]
pub struct PIIPattern {
    /// Pattern name
    pub name: String,
    /// Regex pattern
    pub pattern: Regex,
    /// Replacement strategy
    pub replacement: PIIReplacement,
    /// Confidence level
    pub confidence: f64,
}

/// PII replacement strategies
#[derive(Debug, Clone)]
pub enum PIIReplacement {
    /// Redact with asterisks
    Redact,
    /// Replace with placeholder
    Placeholder(String),
    /// Hash the value
    Hash,
    /// Remove entirely
    Remove,
    /// Mask partially (keep first/last N characters)
    PartialMask {
        /// Number of characters to keep at start
        keep_start: usize,
        /// Number of characters to keep at end
        keep_end: usize,
    },
}

/// Content moderation rule
#[derive(Debug, Clone)]
pub struct ModerationRule {
    /// Rule name
    pub name: String,
    /// Rule type
    pub rule_type: ModerationType,
    /// Action to take
    pub action: ModerationAction,
    /// Severity level
    pub severity: ModerationSeverity,
}

/// Types of content moderation
#[derive(Debug, Clone)]
pub enum ModerationType {
    /// Hate speech detection
    HateSpeech,
    /// Violence detection
    Violence,
    /// Sexual content detection
    Sexual,
    /// Self-harm detection
    SelfHarm,
    /// Harassment detection
    Harassment,
    /// Illegal activity detection
    IllegalActivity,
    /// Custom category
    Custom(String),
}

/// Actions to take when content is flagged
#[derive(Debug, Clone)]
pub enum ModerationAction {
    /// Block the request
    Block,
    /// Warn but allow
    Warn,
    /// Log for review
    Log,
    /// Modify content
    Modify,
    /// Require human review
    HumanReview,
}

/// Severity levels for moderation
#[derive(Debug, Clone, PartialEq, PartialOrd)]
pub enum ModerationSeverity {
    /// Low severity
    Low,
    /// Medium severity
    Medium,
    /// High severity
    High,
    /// Critical severity
    Critical,
}

/// Profanity filter
#[derive(Debug, Clone)]
pub struct ProfanityFilter {
    /// Blocked words list
    blocked_words: Vec<String>,
    /// Replacement character
    replacement_char: char,
    /// Whether to use fuzzy matching
    fuzzy_matching: bool,
}

/// Custom content filter
#[derive(Debug, Clone)]
pub struct CustomFilter {
    /// Filter name
    pub name: String,
    /// Filter pattern
    pub pattern: Regex,
    /// Filter action
    pub action: ModerationAction,
}

/// Content filtering result
#[derive(Debug, Clone)]
pub struct FilterResult {
    /// Whether content should be blocked
    pub blocked: bool,
    /// Detected issues
    pub issues: Vec<ContentIssue>,
    /// Modified content (if applicable)
    pub modified_content: Option<String>,
    /// Confidence score
    pub confidence: f64,
}

/// Detected content issue
#[derive(Debug, Clone)]
pub struct ContentIssue {
    /// Issue type
    pub issue_type: String,
    /// Issue description
    pub description: String,
    /// Severity level
    pub severity: ModerationSeverity,
    /// Location in text (start, end)
    pub location: Option<(usize, usize)>,
    /// Confidence score
    pub confidence: f64,
}

/// GDPR compliance tools
pub struct GDPRCompliance {
    /// Data retention policies
    retention_policies: HashMap<String, RetentionPolicy>,
    /// Consent management
    consent_manager: ConsentManager,
    /// Data export tools
    export_tools: DataExportTools,
}

/// Data retention policy
#[derive(Debug, Clone)]
pub struct RetentionPolicy {
    /// Data type
    pub data_type: String,
    /// Retention period in days
    pub retention_days: u32,
    /// Auto-deletion enabled
    pub auto_delete: bool,
    /// Anonymization rules
    pub anonymization: Option<AnonymizationRule>,
}

/// Consent management
#[derive(Debug, Clone)]
pub struct ConsentManager {
    /// User consents
    consents: HashMap<String, UserConsent>,
}

/// User consent information
#[derive(Debug, Clone)]
pub struct UserConsent {
    /// User ID
    pub user_id: String,
    /// Consent given
    pub consented: bool,
    /// Consent timestamp
    pub timestamp: chrono::DateTime<chrono::Utc>,
    /// Consent version
    pub version: String,
    /// Specific permissions
    pub permissions: Vec<String>,
}

/// Data export tools
#[derive(Debug, Clone)]
pub struct DataExportTools {
    /// Supported export formats
    formats: Vec<ExportFormat>,
}

/// Export formats
#[derive(Debug, Clone)]
pub enum ExportFormat {
    /// JSON format
    JSON,
    /// CSV format
    CSV,
    /// XML format
    XML,
    /// PDF format
    PDF,
}

/// Anonymization rule
#[derive(Debug, Clone)]
pub struct AnonymizationRule {
    /// Fields to anonymize
    pub fields: Vec<String>,
    /// Anonymization method
    pub method: AnonymizationMethod,
}

/// Anonymization methods
#[derive(Debug, Clone)]
pub enum AnonymizationMethod {
    /// Replace with random data
    Randomize,
    /// Hash the data
    Hash,
    /// Remove the data
    Remove,
    /// Generalize the data
    Generalize,
}

impl ContentFilter {
    /// Create a new content filter
    pub fn new() -> Self {
        Self {
            pii_patterns: Self::default_pii_patterns(),
            moderation_rules: Self::default_moderation_rules(),
            profanity_filter: ProfanityFilter::new(),
            custom_filters: Vec::new(),
        }
    }

    /// Filter chat completion request
    pub async fn filter_chat_request(
        &self,
        request: &mut ChatCompletionRequest,
    ) -> Result<FilterResult> {
        let mut issues = Vec::new();
        let mut blocked = false;
        let mut modified = false;

        // Check each message
        for message in &mut request.messages {
            if let Some(MessageContent::Text(text)) = &mut message.content {
                let result = self.filter_text(text).await?;

                if result.blocked {
                    blocked = true;
                }

                issues.extend(result.issues);

                if let Some(modified_text) = result.modified_content {
                    *text = modified_text;
                    modified = true;
                }
            }
        }

        let confidence = if issues.is_empty() {
            1.0
        } else {
            issues.iter().map(|i| i.confidence).sum::<f64>() / issues.len() as f64
        };

        Ok(FilterResult {
            blocked,
            issues,
            modified_content: if modified {
                Some("Messages modified".to_string())
            } else {
                None
            },
            confidence,
        })
    }

    /// Filter text content
    pub async fn filter_text(&self, text: &str) -> Result<FilterResult> {
        let mut issues = Vec::new();
        let mut modified_text = text.to_string();
        let mut blocked = false;

        // PII detection
        for pattern in &self.pii_patterns {
            if let Some(captures) = pattern.pattern.captures(text) {
                let issue = ContentIssue {
                    issue_type: format!("PII_{}", pattern.name),
                    description: format!("Detected {} in content", pattern.name),
                    severity: ModerationSeverity::High,
                    location: captures.get(0).map(|m| (m.start(), m.end())),
                    confidence: pattern.confidence,
                };
                issues.push(issue);

                // Apply replacement
                modified_text = self.apply_pii_replacement(&modified_text, pattern)?;
            }
        }

        // Content moderation
        for rule in &self.moderation_rules {
            if self.check_moderation_rule(&modified_text, rule).await? {
                let issue = ContentIssue {
                    issue_type: format!("MODERATION_{:?}", rule.rule_type),
                    description: format!("Content flagged for {:?}", rule.rule_type),
                    severity: rule.severity.clone(),
                    location: None,
                    confidence: 0.8, // Default confidence
                };
                issues.push(issue);

                match rule.action {
                    ModerationAction::Block => blocked = true,
                    ModerationAction::Warn => warn!("Content warning: {:?}", rule.rule_type),
                    _ => {}
                }
            }
        }

        // Profanity filtering
        if self.profanity_filter.contains_profanity(&modified_text) {
            modified_text = self.profanity_filter.filter(&modified_text);
            issues.push(ContentIssue {
                issue_type: "PROFANITY".to_string(),
                description: "Profanity detected and filtered".to_string(),
                severity: ModerationSeverity::Medium,
                location: None,
                confidence: 0.9,
            });
        }

        let confidence = if issues.is_empty() {
            1.0
        } else {
            issues.iter().map(|i| i.confidence).sum::<f64>() / issues.len() as f64
        };

        Ok(FilterResult {
            blocked,
            issues,
            modified_content: if modified_text != text {
                Some(modified_text)
            } else {
                None
            },
            confidence,
        })
    }

    /// Default PII patterns
    fn default_pii_patterns() -> Vec<PIIPattern> {
        vec![
            PIIPattern {
                name: "SSN".to_string(),
                pattern: Regex::new(r"\b\d{3}-\d{2}-\d{4}\b").unwrap(),
                replacement: PIIReplacement::Placeholder("XXX-XX-XXXX".to_string()),
                confidence: 0.95,
            },
            PIIPattern {
                name: "Email".to_string(),
                pattern: Regex::new(r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b")
                    .unwrap(),
                replacement: PIIReplacement::PartialMask {
                    keep_start: 2,
                    keep_end: 0,
                },
                confidence: 0.9,
            },
            PIIPattern {
                name: "Phone".to_string(),
                pattern: Regex::new(r"\b\d{3}-\d{3}-\d{4}\b").unwrap(),
                replacement: PIIReplacement::Placeholder("XXX-XXX-XXXX".to_string()),
                confidence: 0.85,
            },
            PIIPattern {
                name: "CreditCard".to_string(),
                pattern: Regex::new(r"\b\d{4}[\s-]?\d{4}[\s-]?\d{4}[\s-]?\d{4}\b").unwrap(),
                replacement: PIIReplacement::Placeholder("XXXX-XXXX-XXXX-XXXX".to_string()),
                confidence: 0.9,
            },
        ]
    }

    /// Default moderation rules
    fn default_moderation_rules() -> Vec<ModerationRule> {
        vec![
            ModerationRule {
                name: "Hate Speech".to_string(),
                rule_type: ModerationType::HateSpeech,
                action: ModerationAction::Block,
                severity: ModerationSeverity::High,
            },
            ModerationRule {
                name: "Violence".to_string(),
                rule_type: ModerationType::Violence,
                action: ModerationAction::Warn,
                severity: ModerationSeverity::Medium,
            },
        ]
    }

    /// Apply PII replacement
    fn apply_pii_replacement(&self, text: &str, pattern: &PIIPattern) -> Result<String> {
        let result = match &pattern.replacement {
            PIIReplacement::Redact => pattern.pattern.replace_all(text, "***").to_string(),
            PIIReplacement::Placeholder(placeholder) => pattern
                .pattern
                .replace_all(text, placeholder.as_str())
                .to_string(),
            PIIReplacement::Hash => {
                // Simple hash replacement (in production, use proper hashing)
                pattern.pattern.replace_all(text, "[HASHED]").to_string()
            }
            PIIReplacement::Remove => pattern.pattern.replace_all(text, "").to_string(),
            PIIReplacement::PartialMask {
                keep_start,
                keep_end,
            } => {
                // Implement partial masking logic
                pattern
                    .pattern
                    .replace_all(text, |caps: &regex::Captures| {
                        let matched = caps.get(0).unwrap().as_str();
                        let len = matched.len();
                        if len <= keep_start + keep_end {
                            "*".repeat(len)
                        } else {
                            let start = &matched[..*keep_start];
                            let end = if *keep_end > 0 {
                                &matched[len - keep_end..]
                            } else {
                                ""
                            };
                            let middle = "*".repeat(len - keep_start - keep_end);
                            format!("{}{}{}", start, middle, end)
                        }
                    })
                    .to_string()
            }
        };
        Ok(result)
    }

    /// Check moderation rule
    async fn check_moderation_rule(&self, text: &str, rule: &ModerationRule) -> Result<bool> {
        // Simplified moderation check - in production, integrate with external services
        match rule.rule_type {
            ModerationType::HateSpeech => {
                Ok(text.to_lowercase().contains("hate") || text.to_lowercase().contains("racist"))
            }
            ModerationType::Violence => Ok(
                text.to_lowercase().contains("violence") || text.to_lowercase().contains("kill")
            ),
            _ => Ok(false),
        }
    }
}

impl ProfanityFilter {
    /// Create a new profanity filter
    pub fn new() -> Self {
        Self {
            blocked_words: vec![
                "badword1".to_string(),
                "badword2".to_string(),
                // Add more blocked words
            ],
            replacement_char: '*',
            fuzzy_matching: true,
        }
    }

    /// Check if text contains profanity
    pub fn contains_profanity(&self, text: &str) -> bool {
        let lower_text = text.to_lowercase();
        self.blocked_words
            .iter()
            .any(|word| lower_text.contains(word))
    }

    /// Filter profanity from text
    pub fn filter(&self, text: &str) -> String {
        let mut result = text.to_string();
        for word in &self.blocked_words {
            let replacement = self.replacement_char.to_string().repeat(word.len());
            result = result.replace(word, &replacement);
        }
        result
    }
}

impl Default for ContentFilter {
    fn default() -> Self {
        Self::new()
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[tokio::test]
    async fn test_pii_detection() {
        let filter = ContentFilter::new();
        let text = "My SSN is 123-45-6789 and email is test@example.com";

        let result = filter.filter_text(text).await.unwrap();
        assert!(!result.issues.is_empty());
        assert!(result.modified_content.is_some());
    }

    #[tokio::test]
    async fn test_profanity_filter() {
        let filter = ProfanityFilter::new();
        assert!(filter.contains_profanity("This contains badword1"));

        let filtered = filter.filter("This contains badword1");
        assert!(!filtered.contains("badword1"));
    }
}