Skip to main content

rs_zero/core/logging/
redaction.rs

1/// Redaction settings for logs.
2#[derive(Debug, Clone, PartialEq, Eq)]
3pub struct RedactionConfig {
4    /// Case-insensitive key fragments treated as sensitive.
5    pub sensitive_keys: Vec<String>,
6    /// Replacement text.
7    pub replacement: String,
8}
9
10impl Default for RedactionConfig {
11    fn default() -> Self {
12        Self {
13            sensitive_keys: vec![
14                "authorization".to_string(),
15                "cookie".to_string(),
16                "token".to_string(),
17                "password".to_string(),
18                "secret".to_string(),
19                "api_key".to_string(),
20                "apikey".to_string(),
21                "key".to_string(),
22            ],
23            replacement: "[REDACTED]".to_string(),
24        }
25    }
26}
27
28/// Redacts common sensitive patterns from free-form text.
29pub fn redact_text(input: &str, config: &RedactionConfig) -> String {
30    let tokens = input
31        .split_inclusive(char::is_whitespace)
32        .collect::<Vec<_>>();
33    let mut output = Vec::with_capacity(tokens.len());
34    let mut index = 0;
35    while index < tokens.len() {
36        let token = tokens[index];
37        let word = token.trim();
38        let lower = word.to_ascii_lowercase();
39        if lower.trim_end_matches(':') == "authorization" && index + 1 < tokens.len() {
40            output.push(format!(
41                "{}{}",
42                token_prefix(token, word),
43                format_authorization_key(word)
44            ));
45            output.push(format!(
46                "{}{}",
47                config.replacement,
48                token_suffix(token, word)
49            ));
50            index += skip_authorization_tokens(&tokens[index + 1..]);
51            continue;
52        }
53        if let Some((key, value)) = word.split_once('=')
54            && is_sensitive_key(key, config)
55        {
56            output.push(format!(
57                "{}{key}={}{}",
58                token_prefix(token, word),
59                config.replacement,
60                token_suffix(token, word)
61            ));
62            index += skip_sensitive_value_tokens(value, &tokens[index + 1..]);
63            continue;
64        }
65        if is_sensitive_key(&lower, config) && index + 1 < tokens.len() {
66            output.push(token.to_string());
67            output.push(format!(
68                "{}{}",
69                config.replacement,
70                token_suffix(tokens[index + 1], tokens[index + 1].trim())
71            ));
72            index += 2;
73            continue;
74        }
75        output.push(token.to_string());
76        index += 1;
77    }
78    output.concat()
79}
80
81fn skip_authorization_tokens(tokens: &[&str]) -> usize {
82    let credential_tokens = if tokens
83        .first()
84        .is_some_and(|value| value.trim().eq_ignore_ascii_case("bearer"))
85    {
86        2
87    } else {
88        1
89    };
90    credential_tokens + 1
91}
92
93fn skip_sensitive_value_tokens(first_value: &str, tokens: &[&str]) -> usize {
94    let value = first_value.trim();
95    if !value.starts_with('"') || value.ends_with('"') {
96        return 1;
97    }
98    for (index, token) in tokens.iter().enumerate() {
99        if token.trim().ends_with('"') {
100            return index + 2;
101        }
102    }
103    tokens.len() + 1
104}
105
106pub(crate) fn is_sensitive_key(key: &str, config: &RedactionConfig) -> bool {
107    let lower = key.to_ascii_lowercase();
108    config
109        .sensitive_keys
110        .iter()
111        .any(|item| matches_sensitive_key(&lower, item))
112}
113
114fn matches_sensitive_key(key: &str, pattern: &str) -> bool {
115    if pattern == "key" {
116        return key == "key" || key.ends_with("_key") || key.ends_with("-key");
117    }
118    key.contains(pattern)
119}
120
121fn format_authorization_key(word: &str) -> &str {
122    if word.ends_with(':') {
123        word
124    } else {
125        "Authorization:"
126    }
127}
128
129fn token_prefix<'a>(token: &'a str, word: &str) -> &'a str {
130    token
131        .split_once(word)
132        .map(|(prefix, _)| prefix)
133        .unwrap_or("")
134}
135
136fn token_suffix<'a>(token: &'a str, word: &str) -> &'a str {
137    token
138        .split_once(word)
139        .map(|(_, suffix)| suffix)
140        .unwrap_or("")
141}