1use std::collections::HashMap;
2use std::io::{Read, Write, Seek, SeekFrom};
3use serde::{Deserialize, Serialize};
4use crate::dna::hel::error::HlxError;
5pub use crate::dna::atp::types::{AgentConfig, WorkflowConfig, CrewConfig, ContextConfig, HelixConfig};
6#[cfg(feature = "zstd")]
7use zstd::{Encoder, Decoder};
8#[cfg(feature = "lz4_flex")]
9use lz4_flex::{compress_prepend_size, decompress_size_prepended};
10#[cfg(feature = "flate2")]
11use flate2::{Compression, write::GzEncoder, read::GzDecoder};
12#[cfg(feature = "bincode")]
13use bincode::{serialize, deserialize};
14#[cfg(feature = "crc32fast")]
15use crc32fast::Hasher as Crc32Hasher;
16
17pub const HLXB_MAGIC: &[u8; 4] = b"HLXB";
22pub const HLXB_VERSION: u8 = 1;
23
24#[derive(Debug, Clone, Serialize, Deserialize)]
26pub struct HlxbConfig {
27 pub sections: HashMap<String, HlxbSection>,
29 pub metadata: HashMap<String, serde_json::Value>,
31}
32
33#[derive(Debug, Clone, Serialize, Deserialize)]
35pub struct HlxbSection {
36 pub properties: HashMap<String, serde_json::Value>,
38 #[serde(skip_serializing_if = "Option::is_none")]
40 pub metadata: Option<HashMap<String, serde_json::Value>>,
41}
42
43#[derive(Debug)]
45pub struct HlxbHeader {
46 pub magic: [u8; 4],
47 pub version: u8,
48 pub created_at: u64,
49 pub section_count: u32,
50}
51
52#[repr(u8)]
54#[derive(Debug, Clone, Copy)]
55pub enum SectionType {
56 Agents = 0x01,
57 Workflows = 0x02,
58 Crews = 0x03,
59 Contexts = 0x04,
60 Metadata = 0x05,
61}
62
63#[derive(Debug, Clone, Copy, PartialEq)]
65pub enum CompressionAlgorithm {
66 None,
67 Lz4,
68 Zstd,
69 Gzip,
70}
71
72impl CompressionAlgorithm {
73 pub fn select_best(data: &[u8]) -> Self {
75 let size = data.len();
76
77 if size < 1024 {
79 return CompressionAlgorithm::None;
80 }
81
82 if size < 64 * 1024 {
84 return CompressionAlgorithm::Lz4;
85 }
86
87 if size < 1024 * 1024 {
89 return CompressionAlgorithm::Zstd;
90 }
91
92 CompressionAlgorithm::Gzip
94 }
95
96 pub fn get_level(&self) -> u32 {
98 match self {
99 CompressionAlgorithm::None => 0,
100 CompressionAlgorithm::Lz4 => 1, CompressionAlgorithm::Zstd => 3, CompressionAlgorithm::Gzip => 6, }
104 }
105}
106
107pub struct CompressionManager;
109
110impl CompressionManager {
111 pub fn compress(data: &[u8], algorithm: CompressionAlgorithm) -> Result<Vec<u8>, HlxError> {
113 match algorithm {
114 CompressionAlgorithm::None => Ok(data.to_vec()),
115 CompressionAlgorithm::Lz4 => {
116 #[cfg(feature = "lz4_flex")]
117 {
118 Ok(compress_prepend_size(data))
119 }
120 #[cfg(not(feature = "lz4_flex"))]
121 {
122 Err(HlxError::feature_error("lz4_flex", "LZ4 compression requires lz4_flex feature"))
123 }
124 }
125 CompressionAlgorithm::Zstd => {
126 #[cfg(feature = "zstd")]
127 {
128 let mut encoder = Encoder::new(Vec::new(), algorithm.get_level() as i32)
129 .map_err(|e| HlxError::compression_error(
130 format!("Failed to create ZSTD encoder: {}", e),
131 "Check zstd library"
132 ))?;
133
134 encoder.write_all(data)
135 .map_err(|e| HlxError::compression_error(
136 format!("ZSTD compression failed: {}", e),
137 "Data may be corrupted"
138 ))?;
139
140 encoder.finish()
141 .map_err(|e| HlxError::compression_error(
142 format!("Failed to finish ZSTD compression: {}", e),
143 "Compression process failed"
144 ))
145 }
146 #[cfg(not(feature = "zstd"))]
147 {
148 Err(HlxError::feature_error("zstd", "ZSTD compression requires zstd feature"))
149 }
150 }
151 CompressionAlgorithm::Gzip => {
152 #[cfg(feature = "flate2")]
153 {
154 let mut encoder = GzEncoder::new(Vec::new(), Compression::new(algorithm.get_level()));
155 std::io::copy(&mut std::io::Cursor::new(data), &mut encoder)
156 .map_err(|e| HlxError::compression_error(
157 format!("GZIP compression failed: {}", e),
158 "Data may be corrupted"
159 ))?;
160 encoder.finish()
161 .map_err(|e| HlxError::compression_error(
162 format!("Failed to finish GZIP compression: {}", e),
163 "Compression process failed"
164 ))
165 }
166 #[cfg(not(feature = "flate2"))]
167 {
168 Err(HlxError::feature_error("flate2", "GZIP compression requires flate2 feature"))
169 }
170 }
171 }
172 }
173
174 pub fn decompress(data: &[u8], algorithm: CompressionAlgorithm) -> Result<Vec<u8>, HlxError> {
176 match algorithm {
177 CompressionAlgorithm::None => Ok(data.to_vec()),
178 CompressionAlgorithm::Lz4 => {
179 #[cfg(feature = "lz4_flex")]
180 {
181 decompress_size_prepended(data).map_err(|e| {
182 HlxError::decompression_error(
183 format!("LZ4 decompression failed: {}", e),
184 "Data may be corrupted or compressed with different settings"
185 )
186 })
187 }
188 #[cfg(not(feature = "lz4_flex"))]
189 {
190 Err(HlxError::feature_error("lz4_flex", "LZ4 decompression requires lz4_flex feature"))
191 }
192 }
193 CompressionAlgorithm::Zstd => {
194 #[cfg(feature = "zstd")]
195 {
196 let mut decoder = Decoder::new(data)
197 .map_err(|e| HlxError::decompression_error(
198 format!("Failed to create ZSTD decoder: {}", e),
199 "Data may be corrupted"
200 ))?;
201
202 let mut decompressed = Vec::new();
203 decoder.read_to_end(&mut decompressed)
204 .map_err(|e| HlxError::decompression_error(
205 format!("ZSTD decompression failed: {}", e),
206 "Data may be corrupted or compressed with incompatible settings"
207 ))?;
208 Ok(decompressed)
209 }
210 #[cfg(not(feature = "zstd"))]
211 {
212 Err(HlxError::feature_error("zstd", "ZSTD decompression requires zstd feature"))
213 }
214 }
215 CompressionAlgorithm::Gzip => {
216 #[cfg(feature = "flate2")]
217 {
218 let mut decoder = GzDecoder::new(data);
219 let mut decompressed = Vec::new();
220 decoder.read_to_end(&mut decompressed)
221 .map_err(|e| HlxError::decompression_error(
222 format!("GZIP decompression failed: {}", e),
223 "Data may be corrupted or compressed with different settings"
224 ))?;
225 Ok(decompressed)
226 }
227 #[cfg(not(feature = "flate2"))]
228 {
229 Err(HlxError::feature_error("flate2", "GZIP decompression requires flate2 feature"))
230 }
231 }
232 }
233 }
234
235 pub fn benchmark_and_select(data: &[u8]) -> CompressionAlgorithm {
237 if data.len() < 1024 {
238 return CompressionAlgorithm::None;
239 }
240
241 let algorithms = [
242 CompressionAlgorithm::Lz4,
243 CompressionAlgorithm::Zstd,
244 CompressionAlgorithm::Gzip,
245 ];
246
247 let mut best_algorithm = CompressionAlgorithm::None;
248 let mut best_ratio = 1.0;
249
250 for algorithm in algorithms.iter() {
251 if let Ok(compressed) = Self::compress(data, *algorithm) {
252 let ratio = compressed.len() as f64 / data.len() as f64;
253 if ratio < best_ratio {
254 best_ratio = ratio;
255 best_algorithm = *algorithm;
256 }
257 }
258 }
259
260 if best_algorithm == CompressionAlgorithm::None && data.len() >= 1024 {
262 #[cfg(feature = "lz4_flex")]
264 { CompressionAlgorithm::Lz4 }
265 #[cfg(not(feature = "lz4_flex"))]
266 { CompressionAlgorithm::None }
267 } else {
268 best_algorithm
269 }
270 }
271}
272
273#[derive(Debug)]
275pub struct SectionHeader {
276 pub section_type: SectionType,
277 pub uncompressed_size: u64,
278 pub compressed_size: u64,
279 pub crc32_checksum: u32,
280}
281
282pub struct HlxbWriter<W: Write + Seek> {
284 writer: W,
285 section_count: u32,
286}
287
288impl<W: Write + Seek> HlxbWriter<W> {
289 pub fn new(writer: W) -> Self {
290 Self { writer, section_count: 0 }
291 }
292
293 pub fn write_header(&mut self) -> Result<(), HlxError> {
294 self.writer.write_all(HLXB_MAGIC)?;
296
297 self.writer.write_all(&[HLXB_VERSION])?;
299
300 let timestamp = std::time::SystemTime::now()
302 .duration_since(std::time::UNIX_EPOCH)
303 .unwrap()
304 .as_secs();
305 self.writer.write_all(×tamp.to_le_bytes())?;
306
307 self.writer.write_all(&0u32.to_le_bytes())?;
309
310 Ok(())
311 }
312
313 fn write_section<T: Serialize>(&mut self, section_type: SectionType, data: &T) -> Result<(), HlxError> {
315 #[cfg(not(feature = "bincode"))]
316 return Err(HlxError::feature_error("bincode", "Binary serialization requires bincode feature"));
317
318 #[cfg(feature = "bincode")]
319 {
320 let serialized_data = serialize(data)
322 .map_err(|e| HlxError::serialization_error(
323 format!("Failed to serialize section data: {}", e),
324 "Check data structure"
325 ))?;
326
327 let uncompressed_size = serialized_data.len() as u64;
328
329 #[cfg(not(feature = "zstd"))]
331 let compressed_data = serialized_data.clone();
332 #[cfg(feature = "zstd")]
333 let compressed_data = {
334 let mut encoder = Encoder::new(Vec::new(), 3)
335 .map_err(|e| HlxError::compression_error(
336 format!("Failed to create ZSTD encoder: {}", e),
337 "Check zstd library"
338 ))?;
339 encoder.write_all(&serialized_data)
340 .map_err(|e| HlxError::compression_error(
341 format!("Failed to compress data: {}", e),
342 "Data may be corrupted"
343 ))?;
344 encoder.finish()
345 .map_err(|e| HlxError::compression_error(
346 format!("Failed to finish compression: {}", e),
347 "Compression process failed"
348 ))?
349 };
350
351 let compressed_size = compressed_data.len() as u64;
352
353 #[cfg(not(feature = "crc32fast"))]
355 let checksum = 0u32;
356 #[cfg(feature = "crc32fast")]
357 let checksum = {
358 let mut hasher = Crc32Hasher::new();
359 hasher.update(&compressed_data);
360 hasher.finalize()
361 };
362
363 self.writer.write_all(&[section_type as u8])?;
365 self.writer.write_all(&uncompressed_size.to_le_bytes())?;
366 self.writer.write_all(&compressed_size.to_le_bytes())?;
367 self.writer.write_all(&checksum.to_le_bytes())?;
368
369 self.writer.write_all(&compressed_data)?;
371
372 self.section_count += 1;
373
374 Ok(())
375 }
376 }
377
378 pub fn write_agents(&mut self, agents: &HashMap<String, AgentConfig>) -> Result<(), HlxError> {
379 self.write_section(SectionType::Agents, agents)
380 }
381
382 pub fn write_workflows(&mut self, workflows: &HashMap<String, WorkflowConfig>) -> Result<(), HlxError> {
383 self.write_section(SectionType::Workflows, workflows)
384 }
385
386 pub fn write_crews(&mut self, crews: &HashMap<String, CrewConfig>) -> Result<(), HlxError> {
387 self.write_section(SectionType::Crews, crews)
388 }
389
390 pub fn write_contexts(&mut self, contexts: &HashMap<String, ContextConfig>) -> Result<(), HlxError> {
391 self.write_section(SectionType::Contexts, contexts)
392 }
393
394 pub fn write_metadata(&mut self, metadata: &HashMap<String, serde_json::Value>) -> Result<(), HlxError> {
395 self.write_section(SectionType::Metadata, metadata)
396 }
397
398 pub fn finalize(&mut self) -> Result<(), HlxError> {
399 let section_count_pos = HLXB_MAGIC.len() + 1 + 8; self.writer.seek(SeekFrom::Start(section_count_pos as u64))?;
402
403 self.writer.write_all(&self.section_count.to_le_bytes())?;
405
406 Ok(())
407 }
408}
409
410pub struct HlxbReader<R: Read + Seek> {
412 reader: R,
413}
414
415impl<R: Read + Seek> HlxbReader<R> {
416 pub fn new(reader: R) -> Self {
417 Self { reader }
418 }
419
420 pub fn read_header(&mut self) -> Result<HlxbHeader, HlxError> {
421 let mut magic = [0u8; 4];
423 self.reader.read_exact(&mut magic)?;
424
425 if magic != *HLXB_MAGIC {
427 return Err(HlxError::validation_error(
428 "Invalid HLXB magic number",
429 "File does not appear to be a valid .hlxb config file"
430 ));
431 }
432
433 let mut version = [0u8; 1];
435 self.reader.read_exact(&mut version)?;
436
437 if version[0] != HLXB_VERSION {
439 return Err(HlxError::validation_error(
440 format!("Unsupported HLXB version: {} (expected {})", version[0], HLXB_VERSION),
441 "File was created with an incompatible version of Helix"
442 ));
443 }
444
445 let mut timestamp_bytes = [0u8; 8];
447 self.reader.read_exact(&mut timestamp_bytes)?;
448 let created_at = u64::from_le_bytes(timestamp_bytes);
449
450 let mut section_count_bytes = [0u8; 4];
452 self.reader.read_exact(&mut section_count_bytes)?;
453 let section_count = u32::from_le_bytes(section_count_bytes);
454
455 Ok(HlxbHeader {
456 magic,
457 version: version[0],
458 created_at,
459 section_count,
460 })
461 }
462
463 fn read_section_header(&mut self) -> Result<SectionHeader, HlxError> {
465 let mut section_type_byte = [0u8; 1];
466 self.reader.read_exact(&mut section_type_byte)?;
467
468 let section_type = match section_type_byte[0] {
469 0x01 => SectionType::Agents,
470 0x02 => SectionType::Workflows,
471 0x03 => SectionType::Crews,
472 0x04 => SectionType::Contexts,
473 0x05 => SectionType::Metadata,
474 _ => return Err(HlxError::validation_error(
475 format!("Unknown section type: 0x{:02x}", section_type_byte[0]),
476 "File contains an unsupported section type"
477 )),
478 };
479
480 let mut uncompressed_size_bytes = [0u8; 8];
481 self.reader.read_exact(&mut uncompressed_size_bytes)?;
482 let uncompressed_size = u64::from_le_bytes(uncompressed_size_bytes);
483
484 let mut compressed_size_bytes = [0u8; 8];
485 self.reader.read_exact(&mut compressed_size_bytes)?;
486 let compressed_size = u64::from_le_bytes(compressed_size_bytes);
487
488 let mut checksum_bytes = [0u8; 4];
489 self.reader.read_exact(&mut checksum_bytes)?;
490 let crc32_checksum = u32::from_le_bytes(checksum_bytes);
491
492 Ok(SectionHeader {
493 section_type,
494 uncompressed_size,
495 compressed_size,
496 crc32_checksum,
497 })
498 }
499
500 fn read_section_data(&mut self, header: &SectionHeader) -> Result<Vec<u8>, HlxError> {
502 let mut compressed_data = vec![0u8; header.compressed_size as usize];
504 self.reader.read_exact(&mut compressed_data)?;
505
506 #[cfg(feature = "crc32fast")]
508 {
509 let mut hasher = Crc32Hasher::new();
510 hasher.update(&compressed_data);
511 let calculated_checksum = hasher.finalize();
512 if calculated_checksum != header.crc32_checksum {
513 return Err(HlxError::validation_error(
514 format!("CRC32 checksum mismatch: expected {}, got {}", header.crc32_checksum, calculated_checksum),
515 "File may be corrupted"
516 ));
517 }
518 }
519
520 #[cfg(not(feature = "zstd"))]
522 let decompressed_data = compressed_data;
523 #[cfg(feature = "zstd")]
524 let decompressed_data = {
525 let mut decoder = Decoder::new(&compressed_data[..])
526 .map_err(|e| HlxError::decompression_error(
527 format!("Failed to create ZSTD decoder: {}", e),
528 "File may be corrupted"
529 ))?;
530 let mut decompressed = Vec::new();
531 decoder.read_to_end(&mut decompressed)
532 .map_err(|e| HlxError::decompression_error(
533 format!("Failed to decompress data: {}", e),
534 "File may be corrupted or compressed with incompatible settings"
535 ))?;
536 decompressed
537 };
538
539 if decompressed_data.len() != header.uncompressed_size as usize {
541 return Err(HlxError::validation_error(
542 format!("Decompressed size mismatch: expected {}, got {}", header.uncompressed_size, decompressed_data.len()),
543 "File may be corrupted"
544 ));
545 }
546
547 Ok(decompressed_data)
548 }
549
550 pub fn read_config(&mut self) -> Result<HelixConfig, HlxError> {
551 #[cfg(not(feature = "bincode"))]
552 return Err(HlxError::feature_error("bincode", "Binary deserialization requires bincode feature"));
553
554 #[cfg(feature = "bincode")]
555 {
556 let header = self.read_header()?;
557
558 let mut config = HelixConfig::default();
559
560 for _ in 0..header.section_count {
561 let section_header = self.read_section_header()?;
562 let section_data = self.read_section_data(§ion_header)?;
563
564 match section_header.section_type {
565 SectionType::Agents => {
566 let agents: HashMap<String, AgentConfig> = deserialize(§ion_data)
567 .map_err(|e| HlxError::deserialization_error(
568 format!("Failed to deserialize agents section: {}", e),
569 "Check agents configuration"
570 ))?;
571 config.agents = agents;
572 }
573 SectionType::Workflows => {
574 let workflows: HashMap<String, WorkflowConfig> = deserialize(§ion_data)
575 .map_err(|e| HlxError::deserialization_error(
576 format!("Failed to deserialize workflows section: {}", e),
577 "Check workflows configuration"
578 ))?;
579 config.workflows = workflows;
580 }
581 SectionType::Crews => {
582 let crews: HashMap<String, CrewConfig> = deserialize(§ion_data)
583 .map_err(|e| HlxError::deserialization_error(
584 format!("Failed to deserialize crews section: {}", e),
585 "Check crews configuration"
586 ))?;
587 config.crews = crews;
588 }
589 SectionType::Contexts => {
590 let contexts: HashMap<String, ContextConfig> = deserialize(§ion_data)
591 .map_err(|e| HlxError::deserialization_error(
592 format!("Failed to deserialize contexts section: {}", e),
593 "Check contexts configuration"
594 ))?;
595 config.contexts = contexts;
596 }
597 SectionType::Metadata => {
598 let _metadata: HashMap<String, serde_json::Value> = deserialize(§ion_data)
601 .map_err(|e| HlxError::deserialization_error(
602 format!("Failed to deserialize metadata section: {}", e),
603 "Check metadata configuration"
604 ))?;
605 }
606 }
607 }
608
609 Ok(config)
610 }
611 }
612}
613
614pub struct HlxbConfigHandler;
616
617impl HlxbConfigHandler {
618 pub fn read_from_file<P: AsRef<std::path::Path>>(path: P) -> Result<HlxbConfig, HlxError> {
620 let mut file = std::fs::File::open(&path)
621 .map_err(|e| HlxError::io_error(
622 format!("Failed to open HLXB config file: {}", e),
623 format!("Check if file exists: {}", path.as_ref().display())
624 ))?;
625
626 Self::read_from_reader(&mut file)
627 }
628
629 pub fn write_to_file<P: AsRef<std::path::Path>>(config: &HlxbConfig, path: P) -> Result<(), HlxError> {
631 let mut file = std::fs::File::create(&path)
632 .map_err(|e| HlxError::io_error(
633 format!("Failed to create HLXB config file: {}", e),
634 format!("Check write permissions: {}", path.as_ref().display())
635 ))?;
636
637 Self::write_to_writer(config, &mut file)
638 }
639
640 pub fn write_helix_config<W: Write + Seek>(config: &HelixConfig, writer: &mut W) -> Result<(), HlxError> {
642 let mut hlxb_writer = HlxbWriter::new(writer);
643 hlxb_writer.write_header()?;
644
645 hlxb_writer.write_agents(&config.agents)?;
647 hlxb_writer.write_workflows(&config.workflows)?;
648 hlxb_writer.write_crews(&config.crews)?;
649 hlxb_writer.write_contexts(&config.contexts)?;
650
651 hlxb_writer.finalize()
652 }
653
654 pub fn read_helix_config<R: Read + Seek>(reader: &mut R) -> Result<HelixConfig, HlxError> {
656 let mut hlxb_reader = HlxbReader::new(reader);
657 hlxb_reader.read_config()
658 }
659
660 pub fn read_from_reader<R: Read + Seek>(reader: &mut R) -> Result<HlxbConfig, HlxError> {
662 let mut magic = [0u8; 4];
665 reader.read_exact(&mut magic)?;
666 if magic != *HLXB_MAGIC {
667 return Err(HlxError::validation_error(
668 "Invalid HLXB magic number",
669 "File does not appear to be a valid .hlxb config file"
670 ));
671 }
672
673 let mut version = [0u8; 1];
675 reader.read_exact(&mut version)?;
676 if version[0] != HLXB_VERSION {
677 return Err(HlxError::validation_error(
678 format!("Unsupported HLXB version: {}", version[0]),
679 "Only version 1 is supported"
680 ));
681 }
682
683 let mut len_bytes = [0u8; 8];
685 reader.read_exact(&mut len_bytes)?;
686 let data_len = u64::from_le_bytes(len_bytes) as usize;
687
688 let mut compressed_data = vec![0u8; data_len];
690 reader.read_exact(&mut compressed_data)?;
691
692 let algorithm = CompressionAlgorithm::select_best(&compressed_data);
694 let decompressed_data = CompressionManager::decompress(&compressed_data, algorithm)?;
695
696 let json_data = String::from_utf8(decompressed_data)
698 .map_err(|e| HlxError::validation_error(
699 format!("Invalid UTF-8 in HLXB file: {}", e),
700 "File may be corrupted"
701 ))?;
702
703 serde_json::from_str(&json_data)
705 .map_err(|e| HlxError::json_error(
706 format!("Failed to parse HLXB config: {}", e),
707 "Check file format"
708 ))
709 }
710
711 pub fn write_to_writer<W: Write + Seek>(config: &HlxbConfig, writer: &mut W) -> Result<(), HlxError> {
713 writer.write_all(HLXB_MAGIC)?;
715
716 writer.write_all(&[HLXB_VERSION])?;
718
719 let json_data = serde_json::to_string(config)
721 .map_err(|e| HlxError::json_error(e.to_string(), ""))?;
722
723 let algorithm = CompressionAlgorithm::select_best(json_data.as_bytes());
725 let compressed_data = CompressionManager::compress(json_data.as_bytes(), algorithm)?;
726
727 let data_len = compressed_data.len() as u64;
729 writer.write_all(&data_len.to_le_bytes())?;
730
731 writer.write_all(&compressed_data)?;
733
734 Ok(())
735 }
736}
737
738#[cfg(test)]
739mod tests {
740 use super::*;
741 use std::io::Cursor;
742
743 #[test]
744 fn test_hlxb_roundtrip() {
745 let mut config = HlxbConfig::default();
746 config.metadata.insert("version".to_string(), serde_json::Value::String("1.0".to_string()));
747
748 let mut db_section = HlxbSection::default();
749 db_section.properties.insert("host".to_string(), serde_json::Value::String("localhost".to_string()));
750 db_section.properties.insert("port".to_string(), serde_json::Value::Number(5432.into()));
751
752 config.sections.insert("database".to_string(), db_section);
753
754 let mut buffer = Vec::new();
756 {
757 let mut cursor = Cursor::new(&mut buffer);
758 HlxbConfigHandler::write_to_writer(&config, &mut cursor).unwrap();
759 }
760
761 {
763 let mut cursor = Cursor::new(&buffer);
764 let read_config = HlxbConfigHandler::read_from_reader(&mut cursor).unwrap();
765
766 assert_eq!(read_config.metadata.get("version").unwrap().as_str().unwrap(), "1.0");
767 assert!(read_config.sections.contains_key("database"));
768
769 let db_section = &read_config.sections["database"];
770 assert_eq!(db_section.properties.get("host").unwrap().as_str().unwrap(), "localhost");
771 assert_eq!(db_section.properties.get("port").unwrap().as_i64().unwrap(), 5432);
772 }
773 }
774
775 #[test]
776 fn test_compression_algorithms() {
777 let test_data = b"Hello, World! This is a test string for compression algorithms.";
778 let large_data = vec![b'A'; 100_000]; #[cfg(feature = "lz4_flex")]
782 {
783 let compressed = CompressionManager::compress(test_data, CompressionAlgorithm::Lz4).unwrap();
784 let decompressed = CompressionManager::decompress(&compressed, CompressionAlgorithm::Lz4).unwrap();
785 assert_eq!(decompressed, test_data);
786 println!("✅ LZ4 compression test passed");
787 }
788
789 #[cfg(feature = "zstd")]
791 {
792 let compressed = CompressionManager::compress(test_data, CompressionAlgorithm::Zstd).unwrap();
793 let decompressed = CompressionManager::decompress(&compressed, CompressionAlgorithm::Zstd).unwrap();
794 assert_eq!(decompressed, test_data);
795 println!("✅ ZSTD compression test passed");
796 }
797
798 #[cfg(feature = "flate2")]
800 {
801 let compressed = CompressionManager::compress(test_data, CompressionAlgorithm::Gzip).unwrap();
802 let decompressed = CompressionManager::decompress(&compressed, CompressionAlgorithm::Gzip).unwrap();
803 assert_eq!(decompressed, test_data);
804 println!("✅ GZIP compression test passed");
805 }
806
807 let compressed = CompressionManager::compress(test_data, CompressionAlgorithm::None).unwrap();
809 let decompressed = CompressionManager::decompress(&compressed, CompressionAlgorithm::None).unwrap();
810 assert_eq!(decompressed, test_data);
811 println!("✅ No compression test passed");
812
813 let algorithm = CompressionAlgorithm::select_best(test_data);
815 assert_ne!(algorithm, CompressionAlgorithm::None); println!("✅ Algorithm selection test passed: {:?}", algorithm);
817
818 let small_data = b"small";
820 let algorithm = CompressionAlgorithm::select_best(small_data);
821 assert_eq!(algorithm, CompressionAlgorithm::None); println!("✅ Small data algorithm selection test passed");
823
824 let best_algorithm = CompressionManager::benchmark_and_select(&large_data);
826 println!("✅ Benchmark and select test passed: {:?}", best_algorithm);
827 }
828
829 #[test]
830 fn test_compressed_hlxb_config_roundtrip() {
831 let mut config = HlxbConfig::default();
832 config.metadata.insert("version".to_string(), serde_json::Value::String("1.0".to_string()));
833 config.metadata.insert("compressed".to_string(), serde_json::Value::Bool(true));
834
835 let db_section = HlxbSection {
836 properties: {
837 let mut props = HashMap::new();
838 props.insert("host".to_string(), serde_json::Value::String("localhost".to_string()));
839 props.insert("port".to_string(), serde_json::Value::Number(5432.into()));
840 props.insert("description".to_string(), serde_json::Value::String("A very long description that should benefit from compression. ".repeat(50)));
842 props
843 },
844 metadata: Some({
845 let mut meta = HashMap::new();
846 meta.insert("created".to_string(), serde_json::Value::String("2024-01-01".to_string()));
847 meta
848 }),
849 };
850
851 config.sections.insert("database".to_string(), db_section);
852
853 let mut buffer = Vec::new();
855 {
856 let mut cursor = Cursor::new(&mut buffer);
857 HlxbConfigHandler::write_to_writer(&config, &mut cursor).unwrap();
858 }
859
860 println!("✅ Compressed HLXB config written, size: {} bytes", buffer.len());
861
862 {
864 let mut cursor = Cursor::new(&buffer);
865 let read_config = HlxbConfigHandler::read_from_reader(&mut cursor).unwrap();
866
867 assert_eq!(read_config.metadata.get("version").unwrap().as_str().unwrap(), "1.0");
868 assert_eq!(read_config.metadata.get("compressed").unwrap().as_bool().unwrap(), true);
869 assert!(read_config.sections.contains_key("database"));
870
871 let db_section = &read_config.sections["database"];
872 assert_eq!(db_section.properties.get("host").unwrap().as_str().unwrap(), "localhost");
873 assert_eq!(db_section.properties.get("port").unwrap().as_i64().unwrap(), 5432);
874 assert!(db_section.metadata.is_some());
875 assert_eq!(db_section.metadata.as_ref().unwrap().get("created").unwrap().as_str().unwrap(), "2024-01-01");
876 }
877
878 println!("✅ Compressed HLXB config roundtrip test passed");
879 }
880}
881
882impl Default for HlxbConfig {
883 fn default() -> Self {
884 Self {
885 sections: HashMap::new(),
886 metadata: HashMap::new(),
887 }
888 }
889}
890
891impl Default for HlxbSection {
892 fn default() -> Self {
893 Self {
894 properties: HashMap::new(),
895 metadata: None,
896 }
897 }
898}