litellm-rs 0.1.1

A high-performance AI Gateway written in Rust, providing OpenAI-compatible APIs with intelligent routing, load balancing, and enterprise features
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
//! Storage layer for the Gateway
//!
//! This module provides data persistence and caching functionality.

/// Database storage module
pub mod database;
/// File storage module
pub mod files;
/// Redis cache module
pub mod redis;
/// Optimized Redis cache module
pub mod redis_optimized;
/// Vector storage module
pub mod vector;

use crate::config::StorageConfig;
use crate::utils::error::{GatewayError, Result};
use std::sync::Arc;
use tracing::{debug, info, warn};

/// Main storage layer that orchestrates all storage backends
#[derive(Debug, Clone)]
pub struct StorageLayer {
    /// Database connection pool
    pub database: Arc<database::Database>,
    /// Redis connection pool
    pub redis: Arc<redis::RedisPool>,
    /// File storage backend
    pub files: Arc<files::FileStorage>,
    /// Vector database client (optional)
    /// Note: Using concrete type instead of trait object for now
    pub vector: Option<Arc<vector::VectorStoreBackend>>,
}

#[allow(dead_code)]
impl StorageLayer {
    /// Create a new storage layer
    pub async fn new(config: &StorageConfig) -> Result<Self> {
        info!("Initializing storage layer");

        // Initialize database
        debug!("Connecting to database");
        let database = Arc::new(database::Database::new(&config.database).await?);

        // Initialize Redis (optional)
        let redis = if config.redis.enabled {
            debug!("Connecting to Redis");
            Arc::new(redis::RedisPool::new(&config.redis).await?)
        } else {
            debug!("Redis disabled, skipping Redis connection");
            // For now, we'll still try to create a Redis pool but ignore errors
            match redis::RedisPool::new(&config.redis).await {
                Ok(pool) => Arc::new(pool),
                Err(_) => {
                    warn!("Redis connection failed, continuing without Redis");
                    // Create a minimal Redis config for fallback
                    let fallback_config = crate::config::models::storage::RedisConfig {
                        url: "redis://localhost:6379".to_string(),
                        enabled: false,
                        max_connections: 1,
                        connection_timeout: 5,
                        cluster: false,
                    };
                    Arc::new(
                        redis::RedisPool::new(&fallback_config)
                            .await
                            .unwrap_or_else(|_| {
                                // This should not happen, but if it does, we'll panic for now
                                panic!("Failed to create fallback Redis pool")
                            }),
                    )
                }
            }
        };

        // Initialize file storage (using default config for now)
        debug!("Initializing file storage");
        let default_file_config = crate::config::models::file_storage::FileStorageConfig::default();
        let files = Arc::new(files::FileStorage::new(&default_file_config).await?);

        // Initialize vector database (optional, using default config for now)
        let vector = None; // TODO: Add vector_db config to StorageConfig

        info!("Storage layer initialized successfully");

        Ok(Self {
            database,
            redis,
            files,
            vector,
        })
    }

    /// Run database migrations
    pub async fn migrate(&self) -> Result<()> {
        info!("Running database migrations");
        self.database.migrate().await?;
        info!("Database migrations completed");
        Ok(())
    }

    /// Health check for all storage backends
    pub async fn health_check(&self) -> Result<StorageHealthStatus> {
        let mut status = StorageHealthStatus {
            database: false,
            redis: false,
            files: false,
            vector: false,
            overall: false,
        };

        // Check database health
        match self.database.health_check().await {
            Ok(_) => status.database = true,
            Err(e) => {
                tracing::warn!("Database health check failed: {}", e);
            }
        }

        // Check Redis health
        match self.redis.health_check().await {
            Ok(_) => status.redis = true,
            Err(e) => {
                tracing::warn!("Redis health check failed: {}", e);
            }
        }

        // Check file storage health
        match self.files.health_check().await {
            Ok(_) => status.files = true,
            Err(e) => {
                tracing::warn!("File storage health check failed: {}", e);
            }
        }

        // Check vector database health (if configured)
        if let Some(vector) = &self.vector {
            match vector.health_check().await {
                Ok(_) => status.vector = true,
                Err(e) => {
                    tracing::warn!("Vector database health check failed: {}", e);
                }
            }
        } else {
            status.vector = true; // Not configured, so consider it healthy
        }

        // Overall health is true if all configured backends are healthy
        status.overall = status.database && status.redis && status.files && status.vector;

        Ok(status)
    }

    /// Close all connections
    pub async fn close(&self) -> Result<()> {
        info!("Closing storage connections");

        // Database connections will be closed when Arc is dropped
        // self.database.close().await?;

        // Close Redis connections
        self.redis.close().await?;

        // Close file storage
        self.files.close().await?;

        // Close vector database connections
        if let Some(vector) = &self.vector {
            vector.close().await?;
        }

        info!("Storage connections closed");
        Ok(())
    }

    /// Get database pool
    pub fn db(&self) -> &database::Database {
        &self.database
    }

    /// Get Redis pool
    pub fn redis(&self) -> &redis::RedisPool {
        &self.redis
    }

    /// Get file storage
    pub fn files(&self) -> &files::FileStorage {
        &self.files
    }

    /// Get vector store (if available)
    pub fn vector(&self) -> Option<&vector::VectorStoreBackend> {
        self.vector.as_deref()
    }

    /// Execute a database transaction (PostgreSQL only)
    #[cfg(feature = "postgres")]
    pub async fn transaction<F, R>(&self, f: F) -> Result<R>
    where
        F: for<'c> FnOnce(
            &'c mut sqlx::Transaction<'_, sqlx::Postgres>,
        ) -> std::pin::Pin<
            Box<dyn std::future::Future<Output = Result<R>> + Send + 'c>,
        >,
    {
        self.database.transaction(f).await
    }

    /// Get a Redis connection
    pub async fn redis_conn(&self) -> Result<redis::RedisConnection> {
        self.redis.get_connection().await
    }

    /// Store file and return file ID
    pub async fn store_file(&self, filename: &str, content: &[u8]) -> Result<String> {
        self.files.store(filename, content).await
    }

    /// Retrieve file content
    pub async fn get_file(&self, file_id: &str) -> Result<Vec<u8>> {
        self.files.get(file_id).await
    }

    /// Delete file
    pub async fn delete_file(&self, file_id: &str) -> Result<()> {
        self.files.delete(file_id).await
    }

    /// Store vector embeddings
    pub async fn store_embeddings(
        &self,
        id: &str,
        embeddings: &[f32],
        metadata: Option<serde_json::Value>,
    ) -> Result<()> {
        if let Some(vector) = &self.vector {
            vector.store(id, embeddings, metadata).await
        } else {
            Err(GatewayError::Config(
                "Vector database not configured".to_string(),
            ))
        }
    }

    /// Search similar vectors
    pub async fn search_similar(
        &self,
        query_vector: &[f32],
        limit: usize,
        threshold: Option<f32>,
    ) -> Result<Vec<vector::SearchResult>> {
        if let Some(vector) = &self.vector {
            vector.search(query_vector, limit, threshold).await
        } else {
            Err(GatewayError::Config(
                "Vector database not configured".to_string(),
            ))
        }
    }

    /// Cache operations
    pub async fn cache_get(&self, key: &str) -> Result<Option<String>> {
        self.redis.get(key).await
    }

    /// Set cache value with optional TTL
    pub async fn cache_set(&self, key: &str, value: &str, ttl: Option<u64>) -> Result<()> {
        self.redis.set(key, value, ttl).await
    }

    /// Delete cache key
    pub async fn cache_delete(&self, key: &str) -> Result<()> {
        self.redis.delete(key).await
    }

    /// Check if cache key exists
    pub async fn cache_exists(&self, key: &str) -> Result<bool> {
        self.redis.exists(key).await
    }

    /// Batch cache operations
    pub async fn cache_mget(&self, keys: &[String]) -> Result<Vec<Option<String>>> {
        self.redis.mget(keys).await
    }

    /// Set multiple cache values with optional TTL
    pub async fn cache_mset(&self, pairs: &[(String, String)], ttl: Option<u64>) -> Result<()> {
        self.redis.mset(pairs, ttl).await
    }

    /// List operations
    /// Push value to list
    pub async fn list_push(&self, key: &str, value: &str) -> Result<()> {
        self.redis.list_push(key, value).await
    }

    /// Pop value from list
    pub async fn list_pop(&self, key: &str) -> Result<Option<String>> {
        self.redis.list_pop(key).await
    }

    /// Get list length
    pub async fn list_length(&self, key: &str) -> Result<usize> {
        self.redis.list_length(key).await
    }

    /// Set operations
    /// Add member to set
    pub async fn set_add(&self, key: &str, member: &str) -> Result<()> {
        self.redis.set_add(key, member).await
    }

    /// Remove member from set
    pub async fn set_remove(&self, key: &str, member: &str) -> Result<()> {
        self.redis.set_remove(key, member).await
    }

    /// Get all members of set
    pub async fn set_members(&self, key: &str) -> Result<Vec<String>> {
        self.redis.set_members(key).await
    }

    /// Hash operations
    /// Set hash field value
    pub async fn hash_set(&self, key: &str, field: &str, value: &str) -> Result<()> {
        self.redis.hash_set(key, field, value).await
    }

    /// Get hash field value
    pub async fn hash_get(&self, key: &str, field: &str) -> Result<Option<String>> {
        self.redis.hash_get(key, field).await
    }

    /// Delete hash field
    pub async fn hash_delete(&self, key: &str, field: &str) -> Result<()> {
        self.redis.hash_delete(key, field).await
    }

    /// Get all hash fields and values
    pub async fn hash_get_all(
        &self,
        key: &str,
    ) -> Result<std::collections::HashMap<String, String>> {
        self.redis.hash_get_all(key).await
    }

    /// Pub/Sub operations
    pub async fn publish(&self, channel: &str, message: &str) -> Result<()> {
        self.redis.publish(channel, message).await
    }

    /// Subscribe to Redis channels
    /// Subscribe to Redis channels for pub/sub messaging
    pub async fn subscribe(&self, channels: &[String]) -> Result<redis::Subscription> {
        self.redis.subscribe(channels).await
    }
}

/// Storage health status
#[derive(Debug, Clone, serde::Serialize)]
pub struct StorageHealthStatus {
    /// Database health status
    pub database: bool,
    /// Redis health status
    pub redis: bool,
    /// File storage health status
    pub files: bool,
    /// Vector storage health status
    pub vector: bool,
    /// Overall health status
    pub overall: bool,
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::config::{DatabaseConfig, RedisConfig};

    #[tokio::test]
    async fn test_storage_layer_creation() {
        let config = StorageConfig {
            database: DatabaseConfig {
                url: "postgresql://localhost:5432/test".to_string(),
                max_connections: 5,
                connection_timeout: 5,
                ssl: false,
                enabled: true,
            },
            redis: RedisConfig {
                url: "redis://localhost:6379".to_string(),
                enabled: true,
                max_connections: 10,
                connection_timeout: 5,
                cluster: false,
            },
        };

        // This test would require actual database connections
        // For now, we'll just test that the config is properly structured
        assert_eq!(config.database.url, "postgresql://localhost:5432/test");
        assert_eq!(config.redis.url, "redis://localhost:6379");
    }
}