1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
# Example configuration showing the new JSON structural logging features
# for the Lethe proxy system
[]
= true
# Logging configuration for debugging and analysis
[]
# Log levels: "off", "basic", "detailed", "debug"
= "detailed"
# Include full request/response payloads in logs (detailed/debug levels only)
= true
# Enable automatic redaction of sensitive content
= true
# Custom regex patterns for redacting sensitive content
= [
"sk-[A-Za-z0-9]{48}", # OpenAI API keys
"Bearer\\s+[A-Za-z0-9._-]+", # Bearer tokens
"x-api-key:\\s*[A-Za-z0-9._-]+", # Anthropic API keys
"\"password\":\\s*\"[^\"]*\"", # Password fields
"\"api_key\":\\s*\"[^\"]*\"", # Generic API keys
"\"secret\":\\s*\"[^\"]*\"", # Generic secrets
"\"token\":\\s*\"[^\"]*\"" # Generic tokens
]
# Log destinations: "stdout", "file", "structured"
= "structured"
# File path (required if destination = "file")
# file_path = "/var/log/lethe-proxy.jsonl"
# Enable correlation IDs for request tracing
= true
# Log performance metrics (timing, size changes)
= true
[]
= "https://api.openai.com"
[]
= "https://api.anthropic.com"
[]
= "inject" # or "passthrough"
[]
= "your-openai-key-here"
= "your-anthropic-key-here"
[]
= true
= 2000000
= "You are a helpful AI assistant operating through the Lethe proxy."
[]
= ["openai", "anthropic"]
[]
= 5000
= 60000
# Example logged JSON structure (when level = "detailed" or "debug"):
#
# {
# "timestamp": "2024-01-15T10:30:00Z",
# "level": "INFO",
# "event": "proxy_request_transform",
# "request_id": "550e8400-e29b-41d4-a716-446655440000",
# "provider": "openai",
# "path": "/v1/chat/completions",
# "method": "POST",
# "auth_mode": "inject",
# "transform": {
# "enabled": true,
# "duration_ms": 5,
# "changes": ["system_prelude_added", "user_content_rewritten"],
# "size_change_percent": 15.2
# },
# "pre_transform": {
# "size_bytes": 256,
# "content_type": "application/json",
# "payload": {
# "model": "gpt-4",
# "messages": [{"role": "user", "content": "Hello!"}]
# }
# },
# "post_transform": {
# "size_bytes": 312,
# "payload": {
# "model": "gpt-4",
# "messages": [
# {"role": "system", "content": "You are a helpful AI assistant operating through the Lethe proxy."},
# {"role": "user", "content": "Hello!"}
# ]
# }
# },
# "request_metadata": {
# "method": "POST",
# "path": "/v1/chat/completions",
# "content_type": "application/json",
# "content_length": 256,
# "headers_count": 5
# },
# "performance": {
# "transform_duration_ms": 5,
# "total_request_duration_ms": null,
# "pre_transform_size_bytes": 256,
# "post_transform_size_bytes": 312,
# "size_change_percent": 15.2
# }
# }
# Additional logging events:
#
# proxy_response - Response metadata and timing
# proxy_error - Request processing errors with context
# proxy_debug - Debug information (debug level only)