base_d/lib.rs
1//! # base-d
2//!
3//! A universal, multi-dictionary encoding library for Rust.
4//!
5//! Encode binary data using numerous dictionaries including RFC standards, ancient scripts,
6//! emoji, playing cards, and more. Supports three encoding modes: radix (true base
7//! conversion), RFC 4648 chunked encoding, and direct byte-range mapping.
8//!
9//! ## Quick Start
10//!
11//! ```
12//! use base_d::{DictionaryRegistry, Dictionary, encode, decode};
13//!
14//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
15//! // Load built-in dictionaries
16//! let config = DictionaryRegistry::load_default()?;
17//! let base64_config = config.get_dictionary("base64").unwrap();
18//!
19//! // Create dictionary
20//! let chars: Vec<char> = base64_config.chars.chars().collect();
21//! let padding = base64_config.padding.as_ref().and_then(|s| s.chars().next());
22//! let mut builder = Dictionary::builder()
23//! .chars(chars)
24//! .mode(base64_config.effective_mode());
25//! if let Some(p) = padding {
26//! builder = builder.padding(p);
27//! }
28//! let dictionary = builder.build()?;
29//!
30//! // Encode and decode
31//! let data = b"Hello, World!";
32//! let encoded = encode(data, &dictionary);
33//! let decoded = decode(&encoded, &dictionary)?;
34//! assert_eq!(data, &decoded[..]);
35//! # Ok(())
36//! # }
37//! ```
38//!
39//! ## Features
40//!
41//! - **33 Built-in Dictionaries**: RFC standards, emoji, ancient scripts, and more
42//! - **3 Encoding Modes**: Radix, chunked (RFC-compliant), byte-range
43//! - **Streaming Support**: Memory-efficient processing for large files
44//! - **Custom Dictionaries**: Define your own via TOML configuration
45//! - **User Configuration**: Load dictionaries from `~/.config/base-d/dictionaries.toml`
46//! - **SIMD Acceleration**: AVX2/SSSE3 on x86_64, NEON on aarch64 (enabled by default)
47//!
48//! ## Cargo Features
49//!
50//! - `simd` (default): Enable SIMD acceleration for encoding/decoding.
51//! Disable with `--no-default-features` for scalar-only builds.
52//!
53//! ## Encoding Modes
54//!
55//! ### Radix Base Conversion
56//!
57//! True base conversion treating data as a large number. Works with any dictionary size.
58//!
59//! ```
60//! use base_d::{Dictionary, EncodingMode, encode};
61//!
62//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
63//! let chars: Vec<char> = "😀😁😂🤣😃😄😅😆".chars().collect();
64//! let dictionary = Dictionary::builder()
65//! .chars(chars)
66//! .mode(EncodingMode::Radix)
67//! .build()?;
68//!
69//! let encoded = encode(b"Hi", &dictionary);
70//! # Ok(())
71//! # }
72//! ```
73//!
74//! ### Chunked Mode (RFC 4648)
75//!
76//! Fixed-size bit groups, compatible with standard base64/base32.
77//!
78//! ```
79//! use base_d::{Dictionary, EncodingMode, encode};
80//!
81//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
82//! let chars: Vec<char> = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
83//! .chars().collect();
84//! let dictionary = Dictionary::builder()
85//! .chars(chars)
86//! .mode(EncodingMode::Chunked)
87//! .padding('=')
88//! .build()?;
89//!
90//! let encoded = encode(b"Hello", &dictionary);
91//! assert_eq!(encoded, "SGVsbG8=");
92//! # Ok(())
93//! # }
94//! ```
95//!
96//! ### Byte Range Mode
97//!
98//! Direct 1:1 byte-to-emoji mapping. Zero encoding overhead.
99//!
100//! ```
101//! use base_d::{Dictionary, EncodingMode, encode};
102//!
103//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
104//! let dictionary = Dictionary::builder()
105//! .mode(EncodingMode::ByteRange)
106//! .start_codepoint(127991) // U+1F3F7
107//! .build()?;
108//!
109//! let data = b"Hi";
110//! let encoded = encode(data, &dictionary);
111//! assert_eq!(encoded.chars().count(), 2); // 1:1 mapping
112//! # Ok(())
113//! # }
114//! ```
115//!
116//! ## Streaming
117//!
118//! For large files, use streaming to avoid loading entire file into memory:
119//!
120//! ```no_run
121//! use base_d::{DictionaryRegistry, StreamingEncoder};
122//! use std::fs::File;
123//!
124//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
125//! let config = DictionaryRegistry::load_default()?;
126//! let dictionary_config = config.get_dictionary("base64").unwrap();
127//!
128//! // ... create dictionary from config
129//! # let chars: Vec<char> = dictionary_config.chars.chars().collect();
130//! # let padding = dictionary_config.padding.as_ref().and_then(|s| s.chars().next());
131//! # let mut builder = base_d::Dictionary::builder().chars(chars).mode(dictionary_config.effective_mode());
132//! # if let Some(p) = padding { builder = builder.padding(p); }
133//! # let dictionary = builder.build()?;
134//!
135//! let mut input = File::open("large_file.bin")?;
136//! let output = File::create("encoded.txt")?;
137//!
138//! let mut encoder = StreamingEncoder::new(&dictionary, output);
139//! encoder.encode(&mut input)?;
140//! # Ok(())
141//! # }
142//! ```
143
144mod core;
145mod encoders;
146mod features;
147
148#[cfg(feature = "simd")]
149mod simd;
150
151pub mod bench;
152pub mod convenience;
153pub mod prelude;
154pub mod wordlists;
155
156pub use convenience::{
157 CompressEncodeResult, HashEncodeResult, compress_encode, compress_encode_with, hash_encode,
158 hash_encode_with,
159};
160pub use core::alternating_dictionary::AlternatingWordDictionary;
161pub use core::config::{
162 CompressionConfig, DictionaryConfig, DictionaryRegistry, DictionaryType, EncodingMode, Settings,
163};
164pub use core::dictionary::{Dictionary, DictionaryBuilder};
165pub use core::word_dictionary::{WordDictionary, WordDictionaryBuilder};
166pub use encoders::algorithms::{DecodeError, DictionaryNotFoundError, find_closest_dictionary};
167
168/// Word-based encoding using radix conversion.
169///
170/// Same mathematical approach as character-based radix encoding,
171/// but outputs words joined by a delimiter instead of concatenated characters.
172pub mod word {
173 pub use crate::encoders::algorithms::word::{decode, encode};
174}
175
176/// Alternating word-based encoding for PGP-style biometric word lists.
177///
178/// Provides direct 1:1 byte-to-word mapping where the dictionary selection
179/// alternates based on byte position (e.g., even/odd bytes use different dictionaries).
180pub mod word_alternating {
181 pub use crate::encoders::algorithms::word_alternating::{decode, encode};
182}
183pub use encoders::streaming::{StreamingDecoder, StreamingEncoder};
184
185// Expose schema encoding functions for CLI
186pub use encoders::algorithms::schema::{
187 SchemaCompressionAlgo, decode_fiche, decode_fiche_path, decode_schema, encode_fiche,
188 encode_fiche_ascii, encode_fiche_light, encode_fiche_minified, encode_fiche_path,
189 encode_fiche_readable, encode_markdown_fiche, encode_markdown_fiche_ascii,
190 encode_markdown_fiche_light, encode_markdown_fiche_markdown, encode_markdown_fiche_readable,
191 encode_schema,
192};
193
194// Expose fiche auto-detection
195pub use encoders::algorithms::schema::fiche_analyzer::{DetectedMode, detect_fiche_mode};
196
197/// Schema encoding types and traits for building custom frontends
198///
199/// This module provides the intermediate representation (IR) layer for schema encoding,
200/// allowing library users to implement custom parsers (YAML, CSV, TOML, etc.) and
201/// serializers that leverage the binary encoding backend.
202///
203/// # Architecture
204///
205/// The schema encoding pipeline has three layers:
206///
207/// 1. **Input layer**: Parse custom formats into IR
208/// - Implement `InputParser` trait
209/// - Reference: `JsonParser`
210///
211/// 2. **Binary layer**: Pack/unpack IR to/from binary
212/// - `pack()` - IR to binary bytes
213/// - `unpack()` - Binary bytes to IR
214/// - `encode_framed()` - Binary to display96 with delimiters
215/// - `decode_framed()` - Display96 to binary
216///
217/// 3. **Output layer**: Serialize IR to custom formats
218/// - Implement `OutputSerializer` trait
219/// - Reference: `JsonSerializer`
220///
221/// # Example: Custom CSV Parser
222///
223/// ```ignore
224/// use base_d::schema::{
225/// InputParser, IntermediateRepresentation, SchemaHeader, FieldDef,
226/// FieldType, SchemaValue, SchemaError, pack, encode_framed,
227/// };
228///
229/// struct CsvParser;
230///
231/// impl InputParser for CsvParser {
232/// type Error = SchemaError;
233///
234/// fn parse(input: &str) -> Result<IntermediateRepresentation, Self::Error> {
235/// // Parse CSV headers
236/// let lines: Vec<&str> = input.lines().collect();
237/// let headers: Vec<&str> = lines[0].split(',').collect();
238///
239/// // Infer types and build fields
240/// let fields: Vec<FieldDef> = headers.iter()
241/// .map(|h| FieldDef::new(h.to_string(), FieldType::String))
242/// .collect();
243///
244/// // Parse rows
245/// let row_count = lines.len() - 1;
246/// let mut values = Vec::new();
247/// for line in &lines[1..] {
248/// for cell in line.split(',') {
249/// values.push(SchemaValue::String(cell.to_string()));
250/// }
251/// }
252///
253/// let header = SchemaHeader::new(row_count, fields);
254/// IntermediateRepresentation::new(header, values)
255/// }
256/// }
257///
258/// // Encode CSV to schema format
259/// let csv = "name,age\nalice,30\nbob,25";
260/// let ir = CsvParser::parse(csv)?;
261/// let binary = pack(&ir);
262/// let encoded = encode_framed(&binary);
263/// ```
264///
265/// # IR Structure
266///
267/// The `IntermediateRepresentation` consists of:
268///
269/// * **Header**: Schema metadata
270/// - Field definitions (name + type)
271/// - Row count
272/// - Optional root key
273/// - Optional null bitmap
274///
275/// * **Values**: Flat array in row-major order
276/// - `[row0_field0, row0_field1, row1_field0, row1_field1, ...]`
277///
278/// # Type System
279///
280/// Supported field types:
281///
282/// * `U64` - Unsigned 64-bit integer
283/// * `I64` - Signed 64-bit integer
284/// * `F64` - 64-bit floating point
285/// * `String` - UTF-8 string
286/// * `Bool` - Boolean
287/// * `Null` - Null value
288/// * `Array(T)` - Homogeneous array of type T
289/// * `Any` - Mixed-type values
290///
291/// # Compression
292///
293/// Optional compression algorithms:
294///
295/// * `SchemaCompressionAlgo::Brotli` - Best ratio
296/// * `SchemaCompressionAlgo::Lz4` - Fastest
297/// * `SchemaCompressionAlgo::Zstd` - Balanced
298///
299/// # See Also
300///
301/// * [SCHEMA.md](../SCHEMA.md) - Full format specification
302/// * `encode_schema()` / `decode_schema()` - High-level JSON functions
303pub mod schema {
304 pub use crate::encoders::algorithms::schema::{
305 // IR types
306 FieldDef,
307 FieldType,
308 // Traits
309 InputParser,
310 IntermediateRepresentation,
311 // Reference implementations
312 JsonParser,
313 JsonSerializer,
314 OutputSerializer,
315 // Compression
316 SchemaCompressionAlgo,
317 // Errors
318 SchemaError,
319 SchemaHeader,
320 SchemaValue,
321 // Binary layer
322 decode_framed,
323 // High-level API
324 decode_schema,
325 encode_framed,
326 encode_schema,
327 pack,
328 unpack,
329 };
330}
331pub use features::{
332 CompressionAlgorithm, DictionaryDetector, DictionaryMatch, HashAlgorithm, XxHashConfig,
333 compress, decompress, detect_dictionary, hash, hash_with_config,
334};
335
336/// Encodes binary data using the specified dictionary.
337///
338/// Automatically selects the appropriate encoding strategy based on the
339/// dictionary's mode (Radix, Chunked, or ByteRange).
340///
341/// # Arguments
342///
343/// * `data` - The binary data to encode
344/// * `dictionary` - The dictionary to use for encoding
345///
346/// # Returns
347///
348/// A string containing the encoded data
349///
350/// # Examples
351///
352/// ```
353/// use base_d::{Dictionary, EncodingMode};
354///
355/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
356/// let chars: Vec<char> = "01".chars().collect();
357/// let dictionary = Dictionary::builder()
358/// .chars(chars)
359/// .mode(EncodingMode::Radix)
360/// .build()?;
361/// let encoded = base_d::encode(b"Hi", &dictionary);
362/// # Ok(())
363/// # }
364/// ```
365pub fn encode(data: &[u8], dictionary: &Dictionary) -> String {
366 match dictionary.mode() {
367 EncodingMode::Radix => encoders::algorithms::radix::encode(data, dictionary),
368 EncodingMode::Chunked => encoders::algorithms::chunked::encode_chunked(data, dictionary),
369 EncodingMode::ByteRange => {
370 encoders::algorithms::byte_range::encode_byte_range(data, dictionary)
371 }
372 }
373}
374
375/// Decodes a string back to binary data using the specified dictionary.
376///
377/// Automatically selects the appropriate decoding strategy based on the
378/// dictionary's mode (Radix, Chunked, or ByteRange).
379///
380/// # Arguments
381///
382/// * `encoded` - The encoded string to decode
383/// * `dictionary` - The dictionary used for encoding
384///
385/// # Returns
386///
387/// A `Result` containing the decoded binary data, or a `DecodeError` if
388/// the input is invalid
389///
390/// # Errors
391///
392/// Returns `DecodeError` if:
393/// - The input contains invalid characters
394/// - The input is empty
395/// - The padding is invalid (for chunked mode)
396///
397/// # Examples
398///
399/// ```
400/// use base_d::{Dictionary, EncodingMode, encode, decode};
401///
402/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
403/// let chars: Vec<char> = "01".chars().collect();
404/// let dictionary = Dictionary::builder()
405/// .chars(chars)
406/// .mode(EncodingMode::Radix)
407/// .build()?;
408/// let data = b"Hi";
409/// let encoded = encode(data, &dictionary);
410/// let decoded = decode(&encoded, &dictionary)?;
411/// assert_eq!(data, &decoded[..]);
412/// # Ok(())
413/// # }
414/// ```
415pub fn decode(encoded: &str, dictionary: &Dictionary) -> Result<Vec<u8>, DecodeError> {
416 match dictionary.mode() {
417 EncodingMode::Radix => encoders::algorithms::radix::decode(encoded, dictionary),
418 EncodingMode::Chunked => encoders::algorithms::chunked::decode_chunked(encoded, dictionary),
419 EncodingMode::ByteRange => {
420 encoders::algorithms::byte_range::decode_byte_range(encoded, dictionary)
421 }
422 }
423}
424
425#[cfg(test)]
426mod tests;