compression_prompt/lib.rs
1//! # Compression Core
2//!
3//! Statistical compression for LLM prompts using intelligent filtering.
4//!
5//! ## Overview
6//!
7//! This library reduces token usage in LLM prompts by using statistical analysis
8//! to identify and filter less important content while preserving semantic meaning.
9//!
10//! ## Architecture
11//!
12//! The compression pipeline:
13//! 1. **Tokenize**: Convert input to tokens using pluggable tokenizer
14//! 2. **Analyze**: Apply statistical filtering to identify important content
15//! 3. **Filter**: Remove less important tokens/segments
16//! 4. **Validate**: Ensure compression preserves semantic quality
17//!
18//! ## Example
19//!
20//! ```rust,ignore
21//! use compression_prompt::{Compressor, CompressorConfig};
22//!
23//! let config = CompressorConfig::default();
24//! let compressor = Compressor::new(config);
25//! let result = compressor.compress(input, &tokenizer)?;
26//!
27//! println!("Saved {} tokens ({:.1}% compression)",
28//! result.original_tokens - result.compressed_tokens,
29//! (1.0 - result.compression_ratio) * 100.0
30//! );
31//! ```
32
33#![warn(missing_docs)]
34#![warn(clippy::all)]
35
36pub mod compressor;
37#[cfg(feature = "image")]
38pub mod image_renderer;
39pub mod quality_metrics;
40pub mod statistical_filter;
41
42pub use compressor::{CompressionResult, Compressor, CompressorConfig, OutputFormat};
43#[cfg(feature = "image")]
44pub use image_renderer::{ImageRenderer, ImageRendererConfig};
45pub use statistical_filter::{StatisticalFilter, StatisticalFilterConfig};
46
47/// Library version
48pub const VERSION: &str = env!("CARGO_PKG_VERSION");