quantum_log 0.3.0

High-performance asynchronous logging framework based on tracing ecosystem
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
//!
//! 数据库 Sink 实现
//!
//! 此模块提供了将日志写入各种数据库的功能,支持 SQLite、MySQL 和 PostgreSQL。
//! 使用连接池和批量插入来优化性能。

use async_trait::async_trait;
use tokio::sync::mpsc;
use tokio::time::{interval, Duration};
use tracing::{debug, error, info, warn};

use crate::config::{DatabaseSinkConfig, DatabaseType};
use crate::core::event::QuantumLogEvent;
use crate::error::QuantumLogError;
use crate::sinks::database::models::{LogBatch, NewQuantumLogEntry};
use crate::sinks::traits::{ExclusiveSink, QuantumSink, SinkError, SinkMetadata, SinkType};

type Result<T> = std::result::Result<T, QuantumLogError>;

#[cfg(feature = "database")]
use diesel::prelude::*;
#[cfg(feature = "database")]
use diesel::r2d2::{ConnectionManager, Pool};

/// 数据库连接池类型别名
#[cfg(all(feature = "database", feature = "sqlite"))]
type SqlitePool = Pool<ConnectionManager<diesel::sqlite::SqliteConnection>>;

#[cfg(all(feature = "database", feature = "mysql"))]
type MysqlPool = Pool<ConnectionManager<diesel::mysql::MysqlConnection>>;

#[cfg(all(feature = "database", feature = "postgres"))]
type PostgresPool = Pool<ConnectionManager<diesel::pg::PgConnection>>;

/// 数据库连接池枚举
#[cfg(feature = "database")]
#[derive(Clone)]
pub enum DatabasePool {
    #[cfg(feature = "sqlite")]
    Sqlite(SqlitePool),
    #[cfg(feature = "mysql")]
    Mysql(MysqlPool),
    #[cfg(feature = "postgres")]
    Postgres(PostgresPool),
}

/// 数据库 Sink 结构体
///
/// 负责将日志事件批量写入数据库,支持多种数据库类型。
#[derive(Clone)]
pub struct DatabaseSink {
    /// 数据库连接池
    #[cfg(feature = "database")]
    pool: DatabasePool,
    /// 配置信息
    config: DatabaseSinkConfig,
    /// 表名(包含 schema 前缀)
    full_table_name: String,
}

impl std::fmt::Debug for DatabaseSink {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        f.debug_struct("DatabaseSink")
            .field("config", &self.config)
            .field("full_table_name", &self.full_table_name)
            .field("pool", &"<DatabasePool>")
            .finish()
    }
}

impl DatabaseSink {
    /// 创建新的数据库 Sink 实例
    ///
    /// # 参数
    /// * `config` - 数据库配置
    ///
    /// # 返回
    /// 返回配置好的 DatabaseSink 实例或错误
    #[cfg(feature = "database")]
    pub async fn new(config: DatabaseSinkConfig) -> Result<Self> {
        let pool = Self::create_connection_pool(&config).await?;

        let full_table_name = if let Some(ref schema) = config.schema_name {
            format!("{}.{}", schema, config.table_name)
        } else {
            config.table_name.clone()
        };

        let sink = Self {
            pool,
            config: config.clone(),
            full_table_name,
        };

        // 如果启用了自动创建表,则创建表
        if config.auto_create_table {
            sink.create_table_if_not_exists().await?;
        }

        Ok(sink)
    }

    /// 创建数据库连接池
    #[cfg(feature = "database")]
    async fn create_connection_pool(config: &DatabaseSinkConfig) -> Result<DatabasePool> {
        use diesel::r2d2::Pool;

        let timeout_duration = Duration::from_millis(config.connection_timeout_ms);

        match config.db_type {
            #[cfg(feature = "sqlite")]
            DatabaseType::Sqlite => {
                let manager = ConnectionManager::<diesel::sqlite::SqliteConnection>::new(
                    &config.connection_string,
                );
                let pool = Pool::builder()
                    .max_size(config.connection_pool_size)
                    .connection_timeout(timeout_duration)
                    .build(manager)
                    .map_err(|e| {
                        QuantumLogError::DatabaseError(format!("SQLite 连接池创建失败: {}", e))
                    })?;
                Ok(DatabasePool::Sqlite(pool))
            }
            #[cfg(feature = "mysql")]
            DatabaseType::Mysql => {
                let manager = ConnectionManager::<diesel::mysql::MysqlConnection>::new(
                    &config.connection_string,
                );
                let pool = Pool::builder()
                    .max_size(config.connection_pool_size)
                    .connection_timeout(timeout_duration)
                    .build(manager)
                    .map_err(|e| {
                        QuantumLogError::DatabaseError(format!("MySQL 连接池创建失败: {}", e))
                    })?;
                Ok(DatabasePool::Mysql(pool))
            }
            #[cfg(feature = "postgres")]
            DatabaseType::Postgresql => {
                let manager =
                    ConnectionManager::<diesel::pg::PgConnection>::new(&config.connection_string);
                let pool = Pool::builder()
                    .max_size(config.connection_pool_size)
                    .connection_timeout(timeout_duration)
                    .build(manager)
                    .map_err(|e| {
                        QuantumLogError::DatabaseError(format!("PostgreSQL 连接池创建失败: {}", e))
                    })?;
                Ok(DatabasePool::Postgres(pool))
            }
            #[cfg(not(feature = "sqlite"))]
            DatabaseType::Sqlite => Err(QuantumLogError::DatabaseError(
                "SQLite support not enabled".to_string(),
            )),
            #[cfg(not(feature = "mysql"))]
            DatabaseType::Mysql => Err(QuantumLogError::DatabaseError(
                "MySQL support not enabled".to_string(),
            )),
            #[cfg(not(feature = "postgres"))]
            DatabaseType::Postgresql => Err(QuantumLogError::DatabaseError(
                "PostgreSQL support not enabled".to_string(),
            )),
        }
    }

    /// 发送事件到数据库
    #[cfg(feature = "database")]
    pub async fn send_event(
        &self,
        event: QuantumLogEvent,
        _strategy: &crate::config::BackpressureStrategy,
    ) -> Result<()> {
        // 将事件转换为数据库条目
        let entry = self.convert_event_to_entry(&event).await?;

        // 直接插入单个条目
        let pool = self.pool.clone();
        let entries = vec![entry];

        tokio::task::spawn_blocking(move || Self::insert_batch_blocking(pool, entries))
            .await
            .map_err(|e| QuantumLogError::DatabaseError(format!("数据库插入任务执行失败: {}", e)))?
    }

    /// 关闭数据库 Sink
    #[cfg(feature = "database")]
    pub async fn shutdown(self) -> Result<()> {
        // 数据库 Sink 使用 spawn_task 模式,关闭由任务内部处理
        // 这里只需要返回成功即可
        Ok(())
    }

    /// 创建表(如果不存在)
    #[cfg(feature = "database")]
    async fn create_table_if_not_exists(&self) -> Result<()> {
        use crate::sinks::database::schema::create_table_sql;

        let sql = match self.config.db_type {
            #[cfg(feature = "sqlite")]
            DatabaseType::Sqlite => create_table_sql::SQLITE_CREATE_TABLE,
            #[cfg(feature = "mysql")]
            DatabaseType::Mysql => create_table_sql::MYSQL_CREATE_TABLE,
            #[cfg(feature = "postgres")]
            DatabaseType::Postgresql => create_table_sql::POSTGRES_CREATE_TABLE,
            #[cfg(not(feature = "sqlite"))]
            DatabaseType::Sqlite => {
                return Err(QuantumLogError::ConfigError(
                    "SQLite support not enabled".to_string(),
                ))
            }
            #[cfg(not(feature = "mysql"))]
            DatabaseType::Mysql => {
                return Err(QuantumLogError::ConfigError(
                    "MySQL support not enabled".to_string(),
                ))
            }
            #[cfg(not(feature = "postgres"))]
            DatabaseType::Postgresql => {
                return Err(QuantumLogError::ConfigError(
                    "PostgreSQL support not enabled".to_string(),
                ))
            }
        };

        let pool = self.pool.clone();
        tokio::task::spawn_blocking(move || {
            match pool {
                #[cfg(feature = "sqlite")]
                DatabasePool::Sqlite(pool) => {
                    let mut conn = pool.get().map_err(|e| {
                        QuantumLogError::DatabaseError(format!("获取 SQLite 连接失败: {}", e))
                    })?;
                    diesel::sql_query(sql).execute(&mut conn).map_err(|e| {
                        QuantumLogError::DatabaseError(format!("SQLite 表创建失败: {}", e))
                    })?;
                }
                #[cfg(feature = "mysql")]
                DatabasePool::Mysql(pool) => {
                    let mut conn = pool.get().map_err(|e| {
                        QuantumLogError::DatabaseError(format!("获取 MySQL 连接失败: {}", e))
                    })?;
                    diesel::sql_query(sql).execute(&mut conn).map_err(|e| {
                        QuantumLogError::DatabaseError(format!("MySQL 表创建失败: {}", e))
                    })?;
                }
                #[cfg(feature = "postgres")]
                DatabasePool::Postgres(pool) => {
                    let mut conn = pool.get().map_err(|e| {
                        QuantumLogError::DatabaseError(format!("获取 PostgreSQL 连接失败: {}", e))
                    })?;
                    diesel::sql_query(sql).execute(&mut conn).map_err(|e| {
                        QuantumLogError::DatabaseError(format!("PostgreSQL 表创建失败: {}", e))
                    })?;
                }
            }
            Ok::<(), QuantumLogError>(())
        })
        .await
        .map_err(|e| QuantumLogError::DatabaseError(format!("表创建任务执行失败: {}", e)))?
    }

    /// 启动数据库 Sink 任务
    ///
    /// # 参数
    /// * `mut receiver` - 接收日志事件的通道
    /// * `shutdown_signal` - 停机信号接收器
    ///
    /// # 返回
    /// 返回任务句柄
    #[cfg(feature = "database")]
    pub fn spawn_task(
        self,
        mut receiver: mpsc::Receiver<QuantumLogEvent>,
        mut shutdown_signal: tokio::sync::broadcast::Receiver<()>,
    ) -> tokio::task::JoinHandle<Result<()>> {
        tokio::spawn(async move {
            let mut batch = LogBatch::new();
            let mut flush_interval = interval(Duration::from_secs(1)); // 每秒检查一次是否需要刷新

            info!(
                "数据库 Sink 任务已启动,数据库类型: {:?}",
                self.config.db_type
            );

            loop {
                tokio::select! {
                    // 接收新的日志事件
                    event = receiver.recv() => {
                        match event {
                            Some(log_event) => {
                                if let Ok(entry) = self.convert_event_to_entry(&log_event).await {
                                    batch.add_entry(entry);

                                    // 检查是否需要刷新批次
                                    if batch.is_full(self.config.batch_size) {
                                        if let Err(e) = self.flush_batch(&mut batch).await {
                                            error!("批量写入数据库失败: {}", e);
                                            if let Some(diagnostics) = crate::diagnostics::get_diagnostics_instance() {
                                                diagnostics.increment_sink_errors();
                                            }
                                        }
                                    }
                                } else {
                                    warn!("转换日志事件为数据库条目失败");
                                }
                            },
                            None => {
                                debug!("日志事件通道已关闭");
                                break;
                            }
                        }
                    },

                    // 定期刷新检查
                    _ = flush_interval.tick() => {
                        if !batch.is_empty() && (batch.is_expired(5) || batch.is_full(self.config.batch_size)) {
                            if let Err(e) = self.flush_batch(&mut batch).await {
                                error!("定期刷新数据库批次失败: {}", e);
                                if let Some(diagnostics) = crate::diagnostics::get_diagnostics_instance() {
                                    diagnostics.increment_sink_errors();
                                }
                            }
                        }
                    },

                    // 接收停机信号
                    _ = shutdown_signal.recv() => {
                        info!("收到停机信号,正在刷新剩余的数据库批次");
                        if !batch.is_empty() {
                            if let Err(e) = self.flush_batch(&mut batch).await {
                                error!("停机时刷新数据库批次失败: {}", e);
                            }
                        }
                        break;
                    }
                }
            }

            info!("数据库 Sink 任务已停止");
            Ok(())
        })
    }

    /// 将 QuantumLogEvent 转换为 NewQuantumLogEntry
    #[cfg(feature = "database")]
    async fn convert_event_to_entry(&self, event: &QuantumLogEvent) -> Result<NewQuantumLogEntry> {
        let mut entry = NewQuantumLogEntry::new(
            event.timestamp.naive_utc(),
            event.level.to_string(),
            event.target.clone(),
            event.message.clone(),
            event.context.pid.try_into().unwrap_or(0),
            event.context.tid.to_string(),
            event.context.hostname.clone().unwrap_or_default(),
            event.context.username.clone().unwrap_or_default(),
        );

        // 设置可选字段
        if let Some(ref file_path) = event.file {
            entry = entry.with_file_info(Some(file_path.clone()), event.line.map(|l| l as i32));
        }

        if let Some(ref module_path) = event.module_path {
            entry = entry.with_module_path(Some(module_path.clone()));
        }

        if let Some(mpi_rank) = event.context.mpi_rank {
            entry = entry.with_mpi_rank(Some(mpi_rank));
        }

        // Note: span_id and span_name fields don't exist in QuantumLogEvent
        // This code is commented out until the event structure is updated
        // if let Some(ref span_id) = event.span_id {
        //     entry = entry.with_span_info(Some(span_id.clone()), event.span_name.clone());
        // }

        // 序列化额外字段
        if !event.fields.is_empty() {
            let fields_json = serde_json::to_string(&event.fields)
                .map_err(|e| QuantumLogError::SerializationError { source: e })?;
            entry = entry.with_fields(Some(fields_json));
        }

        Ok(entry)
    }

    /// 刷新批次到数据库
    #[cfg(feature = "database")]
    async fn flush_batch(&self, batch: &mut LogBatch) -> Result<()> {
        if batch.is_empty() {
            return Ok(());
        }

        let entries = batch.entries.clone();
        let pool = self.pool.clone();
        let batch_size = entries.len();

        debug!("正在刷新 {} 条日志到数据库", batch_size);

        // 使用 spawn_blocking 在阻塞线程池中执行数据库操作
        let result =
            tokio::task::spawn_blocking(move || Self::insert_batch_blocking(pool, entries)).await;

        match result {
            Ok(Ok(())) => {
                debug!("成功写入 {} 条日志到数据库", batch_size);
                if let Some(diagnostics) = crate::diagnostics::get_diagnostics_instance() {
                    diagnostics.add_events_processed(batch_size as u64);
                }
                batch.clear();
                Ok(())
            }
            Ok(Err(e)) => {
                error!("数据库批量插入失败: {}", e);
                Err(e)
            }
            Err(e) => {
                let error_msg = format!("数据库任务执行失败: {}", e);
                error!("{}", error_msg);
                Err(QuantumLogError::DatabaseError(error_msg))
            }
        }
    }

    /// 在阻塞线程中执行批量插入
    #[cfg(feature = "database")]
    fn insert_batch_blocking(pool: DatabasePool, entries: Vec<NewQuantumLogEntry>) -> Result<()> {
        use crate::sinks::database::schema::quantum_logs;

        match pool {
            #[cfg(feature = "sqlite")]
            DatabasePool::Sqlite(pool) => {
                let mut conn = pool.get().map_err(|e| {
                    QuantumLogError::DatabaseError(format!("获取 SQLite 连接失败: {}", e))
                })?;

                diesel::insert_into(quantum_logs::table)
                    .values(&entries)
                    .execute(&mut conn)
                    .map_err(|e| {
                        QuantumLogError::DatabaseError(format!("SQLite 批量插入失败: {}", e))
                    })?;
            }
            #[cfg(feature = "mysql")]
            DatabasePool::Mysql(pool) => {
                let mut conn = pool.get().map_err(|e| {
                    QuantumLogError::DatabaseError(format!("获取 MySQL 连接失败: {}", e))
                })?;

                diesel::insert_into(quantum_logs::table)
                    .values(&entries)
                    .execute(&mut conn)
                    .map_err(|e| {
                        QuantumLogError::DatabaseError(format!("MySQL 批量插入失败: {}", e))
                    })?;
            }
            #[cfg(feature = "postgres")]
            DatabasePool::Postgres(pool) => {
                let mut conn = pool.get().map_err(|e| {
                    QuantumLogError::DatabaseError(format!("获取 PostgreSQL 连接失败: {}", e))
                })?;

                diesel::insert_into(quantum_logs::table)
                    .values(&entries)
                    .execute(&mut conn)
                    .map_err(|e| {
                        QuantumLogError::DatabaseError(format!("PostgreSQL 批量插入失败: {}", e))
                    })?;
            }
        }

        Ok(())
    }

    /// 测试数据库连接
    #[cfg(feature = "database")]
    pub async fn test_connection(&self) -> Result<()> {
        let pool = self.pool.clone();

        tokio::task::spawn_blocking(move || {
            match pool {
                #[cfg(feature = "sqlite")]
                DatabasePool::Sqlite(pool) => {
                    let _conn = pool.get().map_err(|e| {
                        QuantumLogError::DatabaseError(format!("SQLite 连接测试失败: {}", e))
                    })?;
                }
                #[cfg(feature = "mysql")]
                DatabasePool::Mysql(pool) => {
                    let _conn = pool.get().map_err(|e| {
                        QuantumLogError::DatabaseError(format!("MySQL 连接测试失败: {}", e))
                    })?;
                }
                #[cfg(feature = "postgres")]
                DatabasePool::Postgres(pool) => {
                    let _conn = pool.get().map_err(|e| {
                        QuantumLogError::DatabaseError(format!("PostgreSQL 连接测试失败: {}", e))
                    })?;
                }
            }
            Ok::<(), QuantumLogError>(())
        })
        .await
        .map_err(|e| QuantumLogError::DatabaseError(format!("连接测试任务执行失败: {}", e)))?
    }
}

// 实现 QuantumSink trait
#[async_trait]
impl QuantumSink for DatabaseSink {
    type Config = DatabaseSinkConfig;
    type Error = SinkError;

    async fn send_event(&self, event: QuantumLogEvent) -> std::result::Result<(), Self::Error> {
        // 将 QuantumLogEvent 转换为 NewQuantumLogEntry
        let mut entry = NewQuantumLogEntry::new(
            event.timestamp.naive_utc(),
            event.level.to_string(),
            event.target.clone(),
            event.message.clone(),
            event.context.pid.try_into().unwrap_or(0),
            event.context.tid.to_string(),
            event.context.hostname.clone().unwrap_or_default(),
            event.context.username.clone().unwrap_or_default(),
        );

        // 设置可选字段
        if let Some(ref file_path) = event.file {
            entry = entry.with_file_info(Some(file_path.clone()), event.line.map(|l| l as i32));
        }

        if let Some(ref module_path) = event.module_path {
            entry = entry.with_module_path(Some(module_path.clone()));
        }

        if let Some(mpi_rank) = event.context.mpi_rank {
            entry = entry.with_mpi_rank(Some(mpi_rank));
        }

        // 序列化额外字段
        if !event.fields.is_empty() {
            let fields_json = serde_json::to_string(&event.fields)
                .map_err(|e| SinkError::Generic(format!("序列化字段失败: {}", e)))?;
            entry = entry.with_fields(Some(fields_json));
        }

        // 创建单个条目的向量
        let entries = vec![entry];

        #[cfg(feature = "database")]
        {
            let pool = self.pool.clone();
            let result =
                tokio::task::spawn_blocking(move || Self::insert_batch_blocking(pool, entries))
                    .await;

            match result {
                Ok(Ok(())) => Ok(()),
                Ok(Err(e)) => Err(SinkError::Database(e.to_string())),
                Err(e) => Err(SinkError::Generic(format!("数据库任务执行失败: {}", e))),
            }
        }
        #[cfg(not(feature = "database"))]
        {
            Err(SinkError::Generic("数据库功能未启用".to_string()))
        }
    }

    async fn shutdown(&self) -> std::result::Result<(), Self::Error> {
        // 数据库连接池会自动处理连接的关闭
        debug!("DatabaseSink 正在关闭");
        Ok(())
    }

    async fn is_healthy(&self) -> bool {
        #[cfg(feature = "database")]
        {
            self.test_connection().await.is_ok()
        }
        #[cfg(not(feature = "database"))]
        {
            false
        }
    }

    fn name(&self) -> &'static str {
        "database"
    }

    fn stats(&self) -> String {
        format!(
            "DatabaseSink: type={:?}, table={}, batch_size={}, pool_size={}",
            self.config.db_type,
            self.full_table_name,
            self.config.batch_size,
            self.config.connection_pool_size
        )
    }

    fn metadata(&self) -> SinkMetadata {
        SinkMetadata {
            name: "database".to_string(),
            sink_type: SinkType::Exclusive,
            enabled: self.config.enabled,
            description: Some(format!(
                "Database sink writing to {} table '{}'",
                match self.config.db_type {
                    DatabaseType::Sqlite => "SQLite",
                    DatabaseType::Mysql => "MySQL",
                    DatabaseType::Postgresql => "PostgreSQL",
                },
                self.full_table_name
            )),
        }
    }
}

// 标记 DatabaseSink 为独占型 Sink
impl ExclusiveSink for DatabaseSink {}

/// 为没有启用数据库特性的情况提供占位实现
#[cfg(not(feature = "database"))]
impl DatabaseSink {
    pub async fn new(_config: DatabaseSinkConfig) -> Result<Self> {
        Err(QuantumLogError::FeatureNotEnabled("database".to_string()))
    }

    pub fn spawn_task(
        self,
        _receiver: mpsc::Receiver<QuantumLogEvent>,
        _shutdown_signal: tokio::sync::broadcast::Receiver<()>,
    ) -> tokio::task::JoinHandle<Result<()>> {
        tokio::spawn(async { Err(QuantumLogError::FeatureNotEnabled("database".to_string())) })
    }

    pub async fn test_connection(&self) -> Result<()> {
        Err(QuantumLogError::FeatureNotEnabled("database".to_string()))
    }
}

#[cfg(all(test, feature = "database"))]
mod tests {
    use super::*;
    use crate::config::DatabaseType;
    use tempfile::tempdir;

    #[tokio::test]
    #[cfg(feature = "sqlite")]
    async fn test_sqlite_database_sink_creation() {
        let temp_dir = tempdir().unwrap();
        let db_path = temp_dir.path().join("test.db");

        let config = DatabaseSinkConfig {
            enabled: true,
            level: Some("INFO".to_string()),
            db_type: DatabaseType::Sqlite,
            connection_string: format!("sqlite://{}", db_path.display()),
            schema_name: None,
            table_name: "quantum_logs".to_string(),
            batch_size: 100,
            connection_pool_size: 5,
            connection_timeout_ms: 5000,
            auto_create_table: true,
        };

        let sink = DatabaseSink::new(config).await;
        assert!(sink.is_ok());

        let sink = sink.unwrap();
        assert!(sink.test_connection().await.is_ok());
    }

    #[tokio::test]
    async fn test_log_batch_operations() {
        let mut batch = LogBatch::new();
        assert!(batch.is_empty());

        let entry = NewQuantumLogEntry::new(
            chrono::Utc::now().naive_utc(),
            "INFO".to_string(),
            "test".to_string(),
            "Test message".to_string(),
            1234,
            "thread-1".to_string(),
            "localhost".to_string(),
            "testuser".to_string(),
        );

        batch.add_entry(entry);
        assert!(!batch.is_empty());
        assert_eq!(batch.len(), 1);

        batch.clear();
        assert!(batch.is_empty());
    }
}