hyperi-rustlib 2.6.1

Opinionated Rust framework for high-throughput data pipelines at PB scale. Auto-wiring config, logging, metrics, tracing, health, and graceful shutdown — built from many years of production infrastructure experience.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
// Project:   hyperi-rustlib
// File:      src/kafka_config.rs
// Purpose:   Shared Kafka librdkafka defaults, profiles, and file config loader
// Language:  Rust
//
// License:   FSL-1.1-ALv2
// Copyright: (c) 2026 HYPERI PTY LIMITED

//! Shared Kafka librdkafka configuration profiles, merge helper, and file loader.
//!
//! This module is always available (no feature gate). The core profile constants
//! and merge helper have zero external dependencies. File loading supports
//! `.properties` without any feature gate; YAML and JSON require the
//! `directory-config` and `config` features respectively.
//!
//! ## Loading from Config Git Directory
//!
//! Services store librdkafka settings in their config git directory and load
//! them with [`config_from_file`]:
//!
//! ```rust,ignore
//! use hyperi_rustlib::kafka_config::{config_from_file, merge_with_overrides, CONSUMER_PRODUCTION};
//!
//! let overrides = config_from_file("/config/kafka.properties")?;
//! let rdkafka_config = merge_with_overrides(CONSUMER_PRODUCTION, &overrides);
//! ```

use std::collections::HashMap;
use std::path::Path;

use thiserror::Error;

// ============================================================================
// Error Type
// ============================================================================

/// Error loading librdkafka configuration from a file.
#[derive(Debug, Error)]
pub enum KafkaConfigError {
    /// File does not exist.
    #[error("kafka config file not found: {path}")]
    FileNotFound { path: std::path::PathBuf },

    /// File extension is not supported (or feature is not enabled).
    #[error("unsupported kafka config format: {ext}. Supported: .properties, .yaml, .yml, .json")]
    UnsupportedFormat { ext: String },

    /// File content could not be parsed.
    #[error("parse error in {path}: {message}")]
    ParseError { path: String, message: String },

    /// I/O error reading the file.
    #[error("io error reading kafka config: {0}")]
    Io(#[from] std::io::Error),
}

/// Result type for kafka config file operations.
pub type KafkaConfigResult<T> = Result<T, KafkaConfigError>;

// ============================================================================
// File Loading
// ============================================================================

/// Load librdkafka configuration from a file in the config git directory.
///
/// Detects format from file extension:
///
/// | Extension | Format | Requires |
/// |-----------|--------|---------|
/// | `.properties` | Java-style `key=value` | nothing (always available) |
/// | `.yaml`, `.yml` | YAML flat mapping | `directory-config` feature |
/// | `.json` | JSON object | `config` feature |
///
/// The returned map passes directly to [`merge_with_overrides`] or as
/// `librdkafka_overrides` in `KafkaConfig`.
///
/// # Errors
///
/// Returns [`KafkaConfigError`] if the file is missing, the format is
/// unsupported, or parsing fails.
pub fn config_from_file(path: impl AsRef<Path>) -> KafkaConfigResult<HashMap<String, String>> {
    let path = path.as_ref();

    let content = std::fs::read_to_string(path).map_err(|e| {
        if e.kind() == std::io::ErrorKind::NotFound {
            KafkaConfigError::FileNotFound {
                path: path.to_path_buf(),
            }
        } else {
            KafkaConfigError::Io(e)
        }
    })?;

    let ext = path
        .extension()
        .and_then(|s| s.to_str())
        .unwrap_or("")
        .to_lowercase();

    let path_str = path.display().to_string();

    match ext.as_str() {
        "properties" => Ok(config_from_properties_str(&content)),
        "yaml" | "yml" => parse_yaml(&content, path_str),
        "json" => parse_json(&content, path_str),
        other => Err(KafkaConfigError::UnsupportedFormat {
            ext: other.to_string(),
        }),
    }
}

/// Parse Java-style `.properties` content into a librdkafka config map.
///
/// Handles:
/// - `key=value` pairs (splits on first `=` only, so values may contain `=`)
/// - `#` and `!` comments
/// - Empty lines and surrounding whitespace
///
/// Always available with no feature gate.
#[must_use]
pub fn config_from_properties_str(content: &str) -> HashMap<String, String> {
    let mut config = HashMap::new();

    for line in content.lines() {
        let line = line.trim();
        if line.is_empty() || line.starts_with('#') || line.starts_with('!') {
            continue;
        }
        if let Some((key, value)) = line.split_once('=') {
            config.insert(key.trim().to_string(), value.trim().to_string());
        }
    }

    config
}

fn parse_yaml(content: &str, path: String) -> KafkaConfigResult<HashMap<String, String>> {
    #[cfg(feature = "directory-config")]
    {
        serde_yaml_ng::from_str(content).map_err(|e| KafkaConfigError::ParseError {
            path,
            message: e.to_string(),
        })
    }
    #[cfg(not(feature = "directory-config"))]
    {
        let _ = (content, path);
        Err(KafkaConfigError::UnsupportedFormat {
            ext: "yaml — enable the `directory-config` feature".to_string(),
        })
    }
}

fn parse_json(content: &str, path: String) -> KafkaConfigResult<HashMap<String, String>> {
    #[cfg(feature = "config")]
    {
        serde_json::from_str(content).map_err(|e| KafkaConfigError::ParseError {
            path,
            message: e.to_string(),
        })
    }
    #[cfg(not(feature = "config"))]
    {
        let _ = (content, path);
        Err(KafkaConfigError::UnsupportedFormat {
            ext: "json — enable the `config` feature".to_string(),
        })
    }
}

// ============================================================================
// Merge Helper
// ============================================================================

/// Merge profile defaults with user overrides.
///
/// Starts with `profile` defaults, then applies `overrides` on top.
/// User overrides always win.
#[must_use]
pub fn merge_with_overrides<S: std::hash::BuildHasher>(
    profile: &[(&str, &str)],
    overrides: &HashMap<String, String, S>,
) -> HashMap<String, String> {
    let mut config = HashMap::with_capacity(profile.len() + overrides.len());

    for (key, value) in profile {
        config.insert((*key).to_string(), (*value).to_string());
    }
    for (key, value) in overrides {
        config.insert(key.clone(), value.clone());
    }

    config
}

// ============================================================================
// Consumer Profiles
// ============================================================================

/// Production consumer baseline — lean, only non-defaults.
///
/// | Setting | Value | librdkafka Default | Why |
/// |---|---|---|---|
/// | `partition.assignment.strategy` | `cooperative-sticky` | `range,roundrobin` | KIP-429: avoids stop-the-world rebalances |
/// | `fetch.min.bytes` | 1 MiB | 1 byte | Batch fetches for throughput |
/// | `fetch.wait.max.ms` | 100 ms | 500 ms | Bound latency when fetch.min.bytes not met |
/// | `queued.min.messages` | 20000 | 100000 | 10-20K batches are most efficient |
/// | `enable.auto.commit` | false | true | DFE services manage offset commits |
/// | `statistics.interval.ms` | 1000 ms | 0 (disabled) | Enable Prometheus metrics |
pub const CONSUMER_PRODUCTION: &[(&str, &str)] = &[
    ("partition.assignment.strategy", "cooperative-sticky"),
    ("fetch.min.bytes", "1048576"),
    ("fetch.wait.max.ms", "100"),
    ("queued.min.messages", "20000"),
    ("enable.auto.commit", "false"),
    ("statistics.interval.ms", "1000"),
];

/// Development/test consumer baseline — fast iteration, low memory.
///
/// | Setting | Value | librdkafka Default | Why |
/// |---|---|---|---|
/// | `partition.assignment.strategy` | `cooperative-sticky` | `range,roundrobin` | Consistent across environments |
/// | `queued.min.messages` | 1000 | 100000 | Lower memory for dev machines |
/// | `enable.auto.commit` | false | true | DFE services manage commits |
/// | `reconnect.backoff.ms` | 10 ms | 100 ms | Fast reconnect for quick iteration |
/// | `reconnect.backoff.max.ms` | 100 ms | 10000 ms | Cap quickly |
/// | `log.connection.close` | true | false | Debug-friendly |
/// | `statistics.interval.ms` | 1000 ms | 0 (disabled) | Enable metrics even in dev |
pub const CONSUMER_DEVTEST: &[(&str, &str)] = &[
    ("partition.assignment.strategy", "cooperative-sticky"),
    ("queued.min.messages", "1000"),
    ("enable.auto.commit", "false"),
    ("reconnect.backoff.ms", "10"),
    ("reconnect.backoff.max.ms", "100"),
    ("log.connection.close", "true"),
    ("statistics.interval.ms", "1000"),
];

/// Low-latency consumer — minimal fetch delay.
///
/// | Setting | Value | librdkafka Default | Why |
/// |---|---|---|---|
/// | `partition.assignment.strategy` | `cooperative-sticky` | `range,roundrobin` | Consistent across envs |
/// | `fetch.wait.max.ms` | 10 ms | 500 ms | Return quickly |
/// | `queued.min.messages` | 1000 | 100000 | Smaller pre-fetch queue |
/// | `enable.auto.commit` | false | true | DFE manages commits |
/// | `reconnect.backoff.ms` | 10 ms | 100 ms | Fast reconnect |
/// | `reconnect.backoff.max.ms` | 100 ms | 10000 ms | Cap quickly |
/// | `statistics.interval.ms` | 1000 ms | 0 | Enable metrics |
pub const CONSUMER_LOW_LATENCY: &[(&str, &str)] = &[
    ("partition.assignment.strategy", "cooperative-sticky"),
    ("fetch.wait.max.ms", "10"),
    ("queued.min.messages", "1000"),
    ("enable.auto.commit", "false"),
    ("reconnect.backoff.ms", "10"),
    ("reconnect.backoff.max.ms", "100"),
    ("statistics.interval.ms", "1000"),
];

// ============================================================================
// Producer Profiles
// ============================================================================

/// Production producer baseline — high throughput, zstd compression.
///
/// | Setting | Value | librdkafka Default | Why |
/// |---|---|---|---|
/// | `linger.ms` | 100 ms | 5 ms | Accumulate larger batches |
/// | `compression.type` | zstd | none | Best ratio with good CPU |
/// | `socket.nagle.disable` | true | false | Kafka batches at app level |
/// | `statistics.interval.ms` | 1000 ms | 0 (disabled) | Enable Prometheus metrics |
pub const PRODUCER_PRODUCTION: &[(&str, &str)] = &[
    ("linger.ms", "100"),
    ("compression.type", "zstd"),
    ("socket.nagle.disable", "true"),
    ("statistics.interval.ms", "1000"),
];

/// Exactly-once producer — idempotence + ordering.
///
/// | Setting | Value | librdkafka Default | Why |
/// |---|---|---|---|
/// | `enable.idempotence` | true | false | Exactly-once within partition |
/// | `acks` | all | all (-1) | Invariant for EOS (explicit) |
/// | `max.in.flight.requests.per.connection` | 5 | 1000000 | Max for idempotent producer |
/// | `linger.ms` | 20 ms | 5 ms | Moderate batching |
/// | `compression.type` | zstd | none | Best ratio |
/// | `socket.nagle.disable` | true | false | Kafka batches at app level |
/// | `statistics.interval.ms` | 1000 ms | 0 | Enable metrics |
pub const PRODUCER_EXACTLY_ONCE: &[(&str, &str)] = &[
    ("enable.idempotence", "true"),
    ("acks", "all"),
    ("max.in.flight.requests.per.connection", "5"),
    ("linger.ms", "20"),
    ("compression.type", "zstd"),
    ("socket.nagle.disable", "true"),
    ("statistics.interval.ms", "1000"),
];

/// Low-latency producer — minimal delay, leader-ack only.
///
/// | Setting | Value | librdkafka Default | Why |
/// |---|---|---|---|
/// | `acks` | 1 | all (-1) | Leader ack only for speed |
/// | `linger.ms` | 0 ms | 5 ms | Send immediately |
/// | `compression.type` | lz4 | none | LZ4 is fastest codec |
/// | `socket.nagle.disable` | true | false | No TCP coalescing |
/// | `statistics.interval.ms` | 1000 ms | 0 | Enable metrics |
pub const PRODUCER_LOW_LATENCY: &[(&str, &str)] = &[
    ("acks", "1"),
    ("linger.ms", "0"),
    ("compression.type", "lz4"),
    ("socket.nagle.disable", "true"),
    ("statistics.interval.ms", "1000"),
];

/// DevTest producer — fast acks, no compression.
///
/// | Setting | Value | librdkafka Default | Why |
/// |---|---|---|---|
/// | `acks` | 1 | all (-1) | Faster for dev |
/// | `socket.nagle.disable` | true | false | No TCP coalescing |
/// | `statistics.interval.ms` | 1000 ms | 0 | Enable metrics in dev |
pub const PRODUCER_DEVTEST: &[(&str, &str)] = &[
    ("acks", "1"),
    ("socket.nagle.disable", "true"),
    ("statistics.interval.ms", "1000"),
];

// ============================================================================
// DFE Source Convention
// ============================================================================

/// Default topic suffix for landing zone (raw ingest).
pub const TOPIC_SUFFIX_LAND: &str = "_land";

/// Default topic suffix for load-ready data (post-transform).
pub const TOPIC_SUFFIX_LOAD: &str = "_load";

/// DFE service role — determines consumer group naming convention.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ServiceRole {
    /// Transform services (middleware): CG = `dfe-{service}-{source}`.
    ///
    /// Transforms sit between `_land` and `_load` topics. Each source gets
    /// its own consumer group so multiple transform pipelines don't compete.
    Transform,

    /// Universal consumers (loader, archiver): CG = `dfe-{service}`.
    ///
    /// Universal services consume from whatever topics are configured or
    /// auto-discovered. The source name is not part of the consumer group.
    Universal,
}

/// DFE source-aware topic naming for transform services.
///
/// All DFE data flows follow the same topology:
///
/// ```text
/// receiver -> {source}_land -> transform -> {source}_load -> loader -> ClickHouse
/// ```
///
/// `DfeSource` is for **transform services** (middleware) that sit between
/// `_land` and `_load`. It derives input/output topic names and source-scoped
/// consumer group IDs from a source name.
///
/// Terminal consumers (loader, archiver) do not use `DfeSource` — they
/// consume from whatever topics are configured or auto-discovered, and their
/// consumer group is simply `dfe-{service}` without a source component.
///
/// # Examples
///
/// ```
/// use hyperi_rustlib::kafka_config::{DfeSource, ServiceRole};
///
/// let source = DfeSource::new("syslog");
/// assert_eq!(source.input_topic(), "syslog_land");
/// assert_eq!(source.output_topic(), "syslog_load");
///
/// // Transform: CG includes source name
/// assert_eq!(
///     source.consumer_group("transform-vector", ServiceRole::Transform, None, None).unwrap(),
///     "dfe-transform-vector-syslog"
/// );
///
/// // Terminal: CG is just the service name
/// assert_eq!(
///     source.consumer_group("loader", ServiceRole::Universal, None, None).unwrap(),
///     "dfe-loader"
/// );
///
/// // Override always wins
/// assert_eq!(
///     source.consumer_group("transform-vector", ServiceRole::Transform, None, Some("custom")).unwrap(),
///     "custom"
/// );
///
/// // Transform without source is an error
/// let empty = DfeSource::new("");
/// assert!(empty.consumer_group("transform-vector", ServiceRole::Transform, None, None).is_err());
/// ```
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct DfeSource {
    name: String,
    land_suffix: String,
    load_suffix: String,
}

impl DfeSource {
    /// Create a new source with default suffixes (`_land`, `_load`).
    #[must_use]
    pub fn new(name: impl Into<String>) -> Self {
        Self {
            name: name.into(),
            land_suffix: TOPIC_SUFFIX_LAND.to_string(),
            load_suffix: TOPIC_SUFFIX_LOAD.to_string(),
        }
    }

    /// Create a source with custom suffixes.
    #[must_use]
    pub fn with_suffixes(
        name: impl Into<String>,
        land_suffix: impl Into<String>,
        load_suffix: impl Into<String>,
    ) -> Self {
        Self {
            name: name.into(),
            land_suffix: land_suffix.into(),
            load_suffix: load_suffix.into(),
        }
    }

    /// Source name (e.g. `"syslog"`, `"netflow"`).
    #[must_use]
    pub fn name(&self) -> &str {
        &self.name
    }

    /// Landing zone topic: `{source}_land`.
    #[must_use]
    pub fn input_topic(&self) -> String {
        format!("{}{}", self.name, self.land_suffix)
    }

    /// Load-ready topic: `{source}_load`.
    #[must_use]
    pub fn output_topic(&self) -> String {
        format!("{}{}", self.name, self.load_suffix)
    }

    /// Consumer group ID following DFE naming conventions.
    ///
    /// The `cg_override` takes precedence when set — use it when the operator
    /// explicitly configures a consumer group in YAML/env.
    ///
    /// When `cg_override` is `None`, the default pattern depends on the
    /// service role:
    ///
    /// | Role | Pattern | Example |
    /// |------|---------|---------|
    /// | Transform | `dfe-{service}-{source}` | `dfe-transform-vector-syslog` |
    /// | Universal (loader, archiver) | `dfe-{service}` | `dfe-loader` |
    ///
    /// For transforms, `pipeline` overrides the source component in the CG
    /// (e.g. `syslog-enriched` instead of `syslog`). Either the `DfeSource`
    /// name or `pipeline` must be non-empty — a bare service name is never
    /// valid for transforms because multiple pipelines would compete.
    ///
    /// # Errors
    ///
    /// Returns `Err` if the role is `Transform` and neither the source name
    /// nor `pipeline` provides a non-empty suffix.
    pub fn consumer_group(
        &self,
        service: &str,
        role: ServiceRole,
        pipeline: Option<&str>,
        cg_override: Option<&str>,
    ) -> Result<String, KafkaConfigError> {
        if let Some(cg) = cg_override {
            return Ok(cg.to_string());
        }

        match role {
            ServiceRole::Transform => {
                let suffix = pipeline.unwrap_or(&self.name);
                if suffix.is_empty() {
                    return Err(KafkaConfigError::ParseError {
                        path: String::new(),
                        message: format!(
                            "transform service '{service}' requires a source or pipeline \
                             name for its consumer group — a bare 'dfe-{service}' CG would \
                             cause multiple pipelines to compete for messages"
                        ),
                    });
                }
                Ok(format!("dfe-{service}-{suffix}"))
            }
            ServiceRole::Universal => Ok(format!("dfe-{service}")),
        }
    }

    /// Derive the source name from a topic by stripping known suffixes.
    ///
    /// Returns `None` if the topic doesn't end with a known suffix.
    ///
    /// ```
    /// use hyperi_rustlib::kafka_config::DfeSource;
    ///
    /// assert_eq!(DfeSource::source_from_topic("syslog_land"), Some("syslog"));
    /// assert_eq!(DfeSource::source_from_topic("netflow_load"), Some("netflow"));
    /// assert_eq!(DfeSource::source_from_topic("unknown"), None);
    /// ```
    #[must_use]
    pub fn source_from_topic(topic: &str) -> Option<&str> {
        topic
            .strip_suffix(TOPIC_SUFFIX_LAND)
            .or_else(|| topic.strip_suffix(TOPIC_SUFFIX_LOAD))
    }
}

// ============================================================================
// Tests
// ============================================================================

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn consumer_production_only_non_defaults() {
        assert_eq!(CONSUMER_PRODUCTION.len(), 6);
        let map: HashMap<&str, &str> = CONSUMER_PRODUCTION.iter().copied().collect();
        assert_eq!(map["partition.assignment.strategy"], "cooperative-sticky");
        assert_eq!(map["fetch.min.bytes"], "1048576");
        assert_eq!(map["fetch.wait.max.ms"], "100");
        assert_eq!(map["queued.min.messages"], "20000");
        assert_eq!(map["enable.auto.commit"], "false");
        assert_eq!(map["statistics.interval.ms"], "1000");
    }

    #[test]
    fn producer_production_only_non_defaults() {
        assert_eq!(PRODUCER_PRODUCTION.len(), 4);
        let map: HashMap<&str, &str> = PRODUCER_PRODUCTION.iter().copied().collect();
        assert_eq!(map["linger.ms"], "100");
        assert_eq!(map["compression.type"], "zstd");
        assert_eq!(map["socket.nagle.disable"], "true");
        assert_eq!(map["statistics.interval.ms"], "1000");
    }

    #[test]
    fn merge_user_overrides_win() {
        let mut overrides = HashMap::new();
        overrides.insert("fetch.min.bytes".to_string(), "2097152".to_string());
        overrides.insert("custom.setting".to_string(), "value".to_string());

        let merged = merge_with_overrides(CONSUMER_PRODUCTION, &overrides);

        assert_eq!(merged["fetch.min.bytes"], "2097152");
        assert_eq!(merged["custom.setting"], "value");
        assert_eq!(
            merged["partition.assignment.strategy"],
            "cooperative-sticky"
        );
    }

    #[test]
    fn merge_empty_overrides_returns_profile() {
        let overrides = HashMap::new();
        let merged = merge_with_overrides(CONSUMER_PRODUCTION, &overrides);
        assert_eq!(merged.len(), CONSUMER_PRODUCTION.len());
    }

    #[test]
    fn merge_empty_profile_returns_overrides() {
        let mut overrides = HashMap::new();
        overrides.insert("key".to_string(), "value".to_string());
        let merged = merge_with_overrides(&[], &overrides);
        assert_eq!(merged.len(), 1);
        assert_eq!(merged["key"], "value");
    }

    #[test]
    fn properties_str_basic() {
        let content = "\
# This is a comment
bootstrap.servers=kafka1:9092,kafka2:9092
security.protocol=SASL_SSL
sasl.mechanism=SCRAM-SHA-512
! Another comment style
";
        let config = config_from_properties_str(content);
        assert_eq!(config.len(), 3);
        assert_eq!(config["bootstrap.servers"], "kafka1:9092,kafka2:9092");
        assert_eq!(config["security.protocol"], "SASL_SSL");
        assert_eq!(config["sasl.mechanism"], "SCRAM-SHA-512");
    }

    #[test]
    fn properties_str_value_with_equals() {
        let content = "ssl.certificate.pem=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMI==\n";
        let config = config_from_properties_str(content);
        assert_eq!(
            config["ssl.certificate.pem"],
            "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMI=="
        );
    }

    #[test]
    fn properties_str_empty_and_whitespace() {
        let config = config_from_properties_str("   \n# comment\n\n");
        assert!(config.is_empty());
    }

    #[test]
    fn config_from_file_properties() {
        let dir = tempfile::tempdir().unwrap();
        let path = dir.path().join("kafka.properties");
        std::fs::write(
            &path,
            "bootstrap.servers=kafka:9092\ncompression.type=zstd\n",
        )
        .unwrap();

        let config = config_from_file(&path).unwrap();
        assert_eq!(config["bootstrap.servers"], "kafka:9092");
        assert_eq!(config["compression.type"], "zstd");
    }

    #[test]
    fn config_from_file_not_found() {
        let result = config_from_file("/nonexistent/kafka.properties");
        assert!(matches!(result, Err(KafkaConfigError::FileNotFound { .. })));
    }

    // ===================================================================
    // DfeSource tests
    // ===================================================================

    #[test]
    fn dfe_source_default_topics() {
        let source = DfeSource::new("syslog");
        assert_eq!(source.name(), "syslog");
        assert_eq!(source.input_topic(), "syslog_land");
        assert_eq!(source.output_topic(), "syslog_load");
    }

    #[test]
    fn dfe_source_custom_suffixes() {
        let source = DfeSource::with_suffixes("auth", "_raw", "_enriched");
        assert_eq!(source.input_topic(), "auth_raw");
        assert_eq!(source.output_topic(), "auth_enriched");
    }

    #[test]
    fn dfe_source_cg_transform_default() {
        let source = DfeSource::new("syslog");
        assert_eq!(
            source
                .consumer_group("transform-vector", ServiceRole::Transform, None, None)
                .unwrap(),
            "dfe-transform-vector-syslog"
        );
    }

    #[test]
    fn dfe_source_cg_transform_with_pipeline() {
        let source = DfeSource::new("syslog");
        assert_eq!(
            source
                .consumer_group(
                    "transform-vector",
                    ServiceRole::Transform,
                    Some("syslog-enriched"),
                    None
                )
                .unwrap(),
            "dfe-transform-vector-syslog-enriched"
        );
    }

    #[test]
    fn dfe_source_cg_transform_empty_source_errors() {
        let source = DfeSource::new("");
        assert!(
            source
                .consumer_group("transform-vector", ServiceRole::Transform, None, None)
                .is_err()
        );
    }

    #[test]
    fn dfe_source_cg_transform_empty_source_pipeline_rescues() {
        let source = DfeSource::new("");
        assert_eq!(
            source
                .consumer_group(
                    "transform-vector",
                    ServiceRole::Transform,
                    Some("syslog"),
                    None
                )
                .unwrap(),
            "dfe-transform-vector-syslog"
        );
    }

    #[test]
    fn dfe_source_cg_universal() {
        let source = DfeSource::new("netflow");
        assert_eq!(
            source
                .consumer_group("loader", ServiceRole::Universal, None, None)
                .unwrap(),
            "dfe-loader"
        );
    }

    #[test]
    fn dfe_source_cg_universal_ignores_pipeline() {
        let source = DfeSource::new("syslog");
        assert_eq!(
            source
                .consumer_group("archiver", ServiceRole::Universal, Some("ignored"), None)
                .unwrap(),
            "dfe-archiver"
        );
    }

    #[test]
    fn dfe_source_cg_override_wins() {
        let source = DfeSource::new("syslog");
        assert_eq!(
            source
                .consumer_group(
                    "transform-vector",
                    ServiceRole::Transform,
                    None,
                    Some("my-custom-cg")
                )
                .unwrap(),
            "my-custom-cg"
        );
    }

    #[test]
    fn dfe_source_cg_override_wins_universal() {
        let source = DfeSource::new("syslog");
        assert_eq!(
            source
                .consumer_group(
                    "loader",
                    ServiceRole::Universal,
                    None,
                    Some("custom-loader-cg")
                )
                .unwrap(),
            "custom-loader-cg"
        );
    }

    #[test]
    fn dfe_source_from_topic_land() {
        assert_eq!(DfeSource::source_from_topic("syslog_land"), Some("syslog"));
        assert_eq!(DfeSource::source_from_topic("auth_land"), Some("auth"));
    }

    #[test]
    fn dfe_source_from_topic_load() {
        assert_eq!(DfeSource::source_from_topic("syslog_load"), Some("syslog"));
        assert_eq!(
            DfeSource::source_from_topic("netflow_load"),
            Some("netflow")
        );
    }

    #[test]
    fn dfe_source_from_topic_unknown() {
        assert_eq!(DfeSource::source_from_topic("unknown"), None);
        assert_eq!(DfeSource::source_from_topic("events"), None);
        assert_eq!(DfeSource::source_from_topic(""), None);
    }

    #[test]
    fn dfe_source_from_topic_edge_cases() {
        assert_eq!(DfeSource::source_from_topic("_land"), Some(""));
        assert_eq!(DfeSource::source_from_topic("a_load"), Some("a"));
    }

    #[test]
    fn config_from_file_unsupported_extension() {
        let dir = tempfile::tempdir().unwrap();
        let path = dir.path().join("kafka.toml");
        std::fs::write(&path, "key = value\n").unwrap();

        let result = config_from_file(&path);
        assert!(matches!(
            result,
            Err(KafkaConfigError::UnsupportedFormat { .. })
        ));
    }
}