pub struct CompactFst<W: Semiring, C: Compactor<W>> { /* private fields */ }Expand description
Memory-optimized FST implementation with pluggable compression strategies
CompactFst is a specialized FST implementation designed for scenarios where memory
efficiency is the primary concern, even at the cost of some computational overhead.
It uses customizable compression strategies to reduce the memory footprint of large
FSTs, making it suitable for deployment on resource-constrained devices or when
working with exceptionally large automata.
§Design Characteristics
- Compression-First: Prioritizes minimal memory usage over access speed
- Pluggable Compaction: Customizable compression strategies via the
Compactortrait - Trade-off Oriented: Exchanges computational overhead for reduced memory footprint
- Specialization-Ready: Supports domain-specific optimizations through custom compactors
- Immutable Structure: Read-only access pattern for predictable memory usage
§Performance Profile
| Operation | Time Complexity | Memory Overhead | Notes |
|---|---|---|---|
| Arc Access | O(1) + decompression | Minimal | Requires decompression per access |
| State Access | O(1) | Fixed per state | Direct indexing into state array |
| Memory Usage | ~40-70% of VectorFst | Depends on compactor | Significant savings |
| Construction | O(V + E) | Temporary spike | One-time compression cost |
| Cache Performance | Variable | Excellent | Compressed data fits in cache |
§Memory Layout and Compression
CompactFst Memory Structure:
┌─────────────────────────────┐
│ States Array │ ← Vec<CompactState>: metadata per state
│ [State 0: arcs_start, ...] │ - final_weightᵢdx: Option<u32>
│ [State 1: arcs_start, ...] │ - arcs_start: u32 (data array offset)
│ [State N: arcs_start, ...] │ - num_arcs: u32 (arc count)
└─────────────────────────────┘
┌─────────────────────────────┐
│ Compressed Data Array │ ← Vec<C::Element>: compressed arcs & weights
│ [Compressed Arc 0] │ Compactor-specific format
│ [Compressed Arc 1] │ May pack multiple fields together
│ [Compressed Weight 0] │ Custom compression schemes
│ [...] │
└─────────────────────────────┘§Compression Strategies
§Default Compression
The DefaultCompactor provides a baseline compression approach:
- Stores arcs and weights in enumerated format
- Maintains full precision of original data
- Suitable for general-purpose usage
§Custom Compression Examples
use arcweight::prelude::*;
use arcweight::fst::{CompactFst, Compactor};
// Example: Custom compactor for small alphabets
#[derive(Debug)]
struct SmallAlphabetCompactor;
impl Compactor<TropicalWeight> for SmallAlphabetCompactor {
type Element = u64; // Pack arc data into single u64
fn compact(&self, arc: &Arc<TropicalWeight>) -> u64 {
// Pack: 16 bits ilabel + 16 bits olabel + 16 bits nextstate + 16 bits weight
let weight_bits = *arc.weight.value() as u64; // Simplified
(arc.ilabel as u64) << 48 |
(arc.olabel as u64) << 32 |
(arc.nextstate as u64) << 16 |
weight_bits
}
fn expand(&self, element: &u64) -> Arc<TropicalWeight> {
let ilabel = (element >> 48) as u32;
let olabel = ((element >> 32) & 0xFFFF) as u32;
let nextstate = ((element >> 16) & 0xFFFF) as u32;
let weight_val = (element & 0xFFFF) as f32;
Arc::new(ilabel, olabel, TropicalWeight::new(weight_val), nextstate)
}
fn compact_weight(&self, weight: &TropicalWeight) -> u64 {
*weight.value() as u64
}
fn expand_weight(&self, element: &u64) -> TropicalWeight {
TropicalWeight::new(*element as f32)
}
}§Use Cases
§Mobile/Embedded Deployment
use arcweight::prelude::*;
use arcweight::fst::{CompactFst, DefaultCompactor};
// Deploy large language model on mobile device
fn create_mobile_language_model() -> CompactFst<TropicalWeight, DefaultCompactor<TropicalWeight>> {
let base_fst = CompactFst::<TropicalWeight, DefaultCompactor<TropicalWeight>>::new();
// Compressed representation reduces memory requirements
// Suitable for devices with limited RAM
base_fst
}
// Memory-conscious processing
fn process_on_mobile_device(
fst: &CompactFst<TropicalWeight, DefaultCompactor<TropicalWeight>>,
input: &[u32]
) {
if let Some(start) = fst.start() {
let mut current = start;
for &label in input {
// Each arc access involves decompression
// But overall memory usage is minimal
for arc in fst.arcs(current) {
if arc.ilabel == label {
current = arc.nextstate;
break;
}
}
}
}
}§Large-Scale Dictionary Compression
use arcweight::prelude::*;
use arcweight::fst::{CompactFst, DefaultCompactor};
// Compress massive pronunciation dictionary
fn compress_pronunciation_dict(
// Input would be a large VectorFst with millions of entries
) -> CompactFst<LogWeight, DefaultCompactor<LogWeight>> {
// The compaction process would convert from VectorFst
// Achieving 40-60% memory reduction for large dictionaries
let compact_dict = CompactFst::new();
// Compressed dict can fit in memory where uncompressed cannot
compact_dict
}
// Lookup in compressed dictionary
fn lookup_pronunciation(
dict: &CompactFst<LogWeight, DefaultCompactor<LogWeight>>,
word: &str
) -> Vec<String> {
let mut pronunciations = Vec::new();
if let Some(start) = dict.start() {
// Traverse compressed FST
// Decompression happens transparently during access
let mut current = start;
for ch in word.chars() {
for arc in dict.arcs(current) {
if arc.ilabel == ch as u32 {
current = arc.nextstate;
break;
}
}
}
// Extract pronunciations from final states
// (Implementation details omitted for brevity)
}
pronunciations
}§Cloud Storage Optimization
use arcweight::prelude::*;
use arcweight::fst::{CompactFst, DefaultCompactor};
// Optimize FSTs for cloud storage and transmission
fn optimize_for_cloud_storage() -> CompactFst<ProbabilityWeight, DefaultCompactor<ProbabilityWeight>> {
let compact_fst = CompactFst::new();
// Benefits:
// - Reduced storage costs (smaller files)
// - Faster network transmission
// - Lower bandwidth usage
// - Reduced I/O operations
compact_fst
}
// Efficient batch processing of compressed FSTs
fn batch_process_compressed_fsts(
fsts: &[CompactFst<ProbabilityWeight, DefaultCompactor<ProbabilityWeight>>]
) {
for fst in fsts {
// Process multiple compressed FSTs in memory simultaneously
// Memory efficiency allows larger batch sizes
process_single_fst(fst);
}
}
fn process_single_fst(
fst: &CompactFst<ProbabilityWeight, DefaultCompactor<ProbabilityWeight>>
) {
// FST processing logic
// Compression overhead amortized across batch processing
}§Memory-Constrained Analysis
use arcweight::prelude::*;
use arcweight::fst::{CompactFst, DefaultCompactor};
// Analyze very large FSTs within memory constraints
fn analyze_large_fst_efficiently(
fst: &CompactFst<BooleanWeight, DefaultCompactor<BooleanWeight>>
) -> AnalysisResult {
let mut result = AnalysisResult::new();
// Memory-efficient traversal
for state in fst.states() {
// Analyze state properties
result.state_count += 1;
// Count arcs with minimal memory overhead
for arc in fst.arcs(state) {
result.arc_count += 1;
// Decompression cost amortized over analysis
if arc.ilabel == 0 {
result.epsilon_count += 1;
}
}
}
result
}
#[derive(Default)]
struct AnalysisResult {
state_count: usize,
arc_count: usize,
epsilon_count: usize,
}
impl AnalysisResult {
fn new() -> Self { Self::default() }
}§Compactor Implementation Patterns
§Domain-Specific Compression
use arcweight::prelude::*;
use arcweight::fst::{Compactor, CompactFst};
// Example: Pronunciation-specific compactor
#[derive(Debug)]
struct PhonemeCompactor;
impl Compactor<TropicalWeight> for PhonemeCompactor {
type Element = CompactPhoneme;
fn compact(&self, arc: &Arc<TropicalWeight>) -> CompactPhoneme {
// Custom compression for phoneme data
// Could map common phoneme combinations to single values
CompactPhoneme {
phoneme_code: map_to_phoneme_code(arc.ilabel, arc.olabel),
weight_class: quantize_weight(&arc.weight),
next_state: arc.nextstate,
}
}
fn expand(&self, element: &CompactPhoneme) -> Arc<TropicalWeight> {
let (ilabel, olabel) = expand_phoneme_code(element.phoneme_code);
let weight = dequantize_weight(element.weight_class);
Arc::new(ilabel, olabel, weight, element.next_state)
}
fn compact_weight(&self, weight: &TropicalWeight) -> CompactPhoneme {
// Weight-only compression
CompactPhoneme {
phoneme_code: 0,
weight_class: quantize_weight(weight),
next_state: 0,
}
}
fn expand_weight(&self, element: &CompactPhoneme) -> TropicalWeight {
dequantize_weight(element.weight_class)
}
}
#[derive(Clone, Debug)]
struct CompactPhoneme {
phoneme_code: u16, // Compressed phoneme pair
weight_class: u8, // Quantized weight
next_state: u32,
}
fn map_to_phoneme_code(ilabel: u32, olabel: u32) -> u16 {
// Domain-specific compression logic
((ilabel & 0xFF) << 8 | (olabel & 0xFF)) as u16
}
fn expand_phoneme_code(code: u16) -> (u32, u32) {
((code >> 8) as u32, (code & 0xFF) as u32)
}
fn quantize_weight(weight: &TropicalWeight) -> u8 {
// Quantize weight to 256 levels
(weight.value().clamp(0.0, 25.5) * 10.0) as u8
}
fn dequantize_weight(quantized: u8) -> TropicalWeight {
TropicalWeight::new(quantized as f32 / 10.0)
}§Performance Optimization Guidelines
§When to Use CompactFst
- ✅ Memory is severely constrained (embedded systems, mobile devices)
- ✅ Very large FSTs that don’t fit in memory uncompressed
- ✅ Network transmission or storage optimization is critical
- ✅ Batch processing where memory efficiency enables larger batches
- ✅ Long-running applications where compression amortizes over time
§When NOT to Use CompactFst
- ❌ Real-time applications requiring minimal latency
- ❌ Frequent random access patterns
- ❌ Small FSTs where compression overhead exceeds benefits
- ❌ Applications that modify FSTs frequently
- ❌ CPU-constrained environments where decompression is expensive
§Memory vs. Performance Trade-offs
- Compression Ratio: Higher compression = more CPU overhead
- Access Patterns: Sequential access amortizes decompression cost
- Cache Behavior: Compressed data may improve cache hit rates
- Batch Processing: Compression overhead amortized across operations
§Limitations and Considerations
§Current Implementation Limitations
final_weight()method requires redesign to avoid reference issues- Limited set of built-in compaction strategies
- No automatic compression strategy selection
- Compression is lossy with some compactors (quantization)
§Design Considerations
- Compactor Choice: Critical for achieving desired compression ratio
- Data Characteristics: Compression effectiveness varies by FST structure
- Access Patterns: Random access amplifies decompression overhead
- Precision Requirements: Some compactors may reduce precision
§Future Enhancements
- Adaptive Compression: Automatic selection of optimal compaction strategy
- Streaming Support: Support for FSTs larger than available memory
- Lossy Compression: Options for approximate FSTs with higher compression
- Incremental Updates: Support for modifying compressed FSTs efficiently
§Available Compression Strategies
- DefaultCompactor: Enum-based storage with moderate compression
- BitPackCompactor: Bit-packing for small label/state spaces
- QuantizedCompactor: Weight quantization for lossy compression
- DeltaCompactor: Delta encoding for sequential patterns
- VarIntCompactor: Variable-length integer encoding
§See Also
VectorFstfor mutable, uncompressed FSTsConstFstfor read-only, optimized FSTs without compressionCacheFstfor caching expensive computations- Memory Management Guide for memory optimization strategies
- Performance Tuning for trade-off analysis
Implementations§
Source§impl<W: Semiring, C: Compactor<W>> CompactFst<W, C>
impl<W: Semiring, C: Compactor<W>> CompactFst<W, C>
Sourcepub fn new() -> Selfwhere
C: Default,
pub fn new() -> Selfwhere
C: Default,
Create a new empty compact FST
Initializes an empty CompactFst with the default compactor strategy.
The FST will use the compactor to compress arcs and weights as they
are added to the structure.
§Examples
use arcweight::prelude::*;
use arcweight::fst::{CompactFst, DefaultCompactor};
// Create an empty compact FST with default compression
let fst = CompactFst::<TropicalWeight, DefaultCompactor<TropicalWeight>>::new();
// FST is initially empty
assert_eq!(fst.num_states(), 0);
assert!(fst.start().is_none());§Performance
This operation is O(1) and allocates minimal memory for the initial empty state and data vectors.
Sourcepub fn with_compactor(compactor: C) -> Self
pub fn with_compactor(compactor: C) -> Self
Create a new compact FST with a specific compactor configuration
This constructor allows specification of the compactor strategy to use. Note that the compactor parameter is used only for type specification since the current Compactor trait is stateless.
§Examples
use arcweight::prelude::*;
use arcweight::fst::{CompactFst, BitPackCompactor, QuantizedCompactor, QuantizationMode};
// Create with bit-packing compactor
let bit_packed = CompactFst::with_compactor(BitPackCompactor::<TropicalWeight>::new(8, 8, 16));
// Create with quantized compactor
let quantized = CompactFst::with_compactor(
QuantizedCompactor::<TropicalWeight>::new(
QuantizationMode::Linear { min: 0.0, max: 100.0 },
256
)
);Sourcepub fn from_fst<F: Fst<W>>(fst: &F) -> Selfwhere
C: Default,
pub fn from_fst<F: Fst<W>>(fst: &F) -> Selfwhere
C: Default,
Convert a VectorFst to a CompactFst with compression
Creates a new CompactFst by compressing all arcs and weights from the source FST. This is the primary way to create a compressed FST from existing data.
§Examples
use arcweight::prelude::*;
use arcweight::fst::{CompactFst, DefaultCompactor};
// Create a VectorFst
let mut vector_fst = VectorFst::<TropicalWeight>::new();
let s0 = vector_fst.add_state();
let s1 = vector_fst.add_state();
vector_fst.set_start(s0);
vector_fst.set_final(s1, TropicalWeight::one());
vector_fst.add_arc(s0, Arc::new(1, 1, TropicalWeight::new(0.5), s1));
// Convert to CompactFst
let compact_fst = CompactFst::<TropicalWeight, DefaultCompactor<TropicalWeight>>::from_fst(&vector_fst);
// Verify same structure
assert_eq!(compact_fst.num_states(), vector_fst.num_states());
assert_eq!(compact_fst.start(), vector_fst.start());§Performance
- Time Complexity: O(V + E) where V = states, E = arcs
- Space Complexity: O(V + E) for compressed storage
- Compression Ratio: Depends on compactor strategy and data characteristics
Sourcepub fn set_final_weight(&mut self, state: StateId, weight: Option<W>)
pub fn set_final_weight(&mut self, state: StateId, weight: Option<W>)
Helper method to set a final weight for a state
This is primarily for testing and prototype purposes since CompactFst doesn’t currently implement MutableFst. In a full implementation, final weights would be set during the compression process.
Source§impl<W: Semiring, C: Compactor<W>> CompactFst<W, C>
impl<W: Semiring, C: Compactor<W>> CompactFst<W, C>
Sourcepub fn compression_ratio(&self) -> f64
pub fn compression_ratio(&self) -> f64
Get compression ratio as a diagnostic metric
Returns the ratio of compressed size to estimated uncompressed size. Lower values indicate better compression efficiency.
Sourcepub fn force_recompress(&mut self)
pub fn force_recompress(&mut self)
Force immediate recompression
This method bypasses the adaptive triggering and immediately performs a full recompression of the FST data. Useful for optimizing before long-running read-heavy operations.
Sourcepub fn enable_adaptive_compression(&mut self, config: AdaptiveConfig)
pub fn enable_adaptive_compression(&mut self, config: AdaptiveConfig)
Enable adaptive compression with streaming support
This method configures the FST for adaptive compression that automatically selects the best compression strategy based on data characteristics and supports streaming operations for very large datasets.
§Adaptive Compression Features
- Dynamic Strategy Selection: Chooses optimal compactor based on data patterns
- Performance Monitoring: Tracks compression ratio and access patterns
- Streaming Support: Handles datasets larger than memory through chunks
- Memory Management: Automatic cache eviction and memory pressure handling
§Arguments
config- Configuration for adaptive compression behavior
§Examples
let mut fst = CompactFst::<TropicalWeight, DefaultCompactor<TropicalWeight>>::new();
let config = AdaptiveConfig {
enable_streaming: true,
memory_limit: 100_000_000, // 100MB
compression_threshold: 0.7,
analysis_window: 1000,
};
fst.enable_adaptive_compression(config);Sourcepub fn enable_streaming(&mut self, config: StreamingConfig)
pub fn enable_streaming(&mut self, config: StreamingConfig)
Enable streaming compression for very large datasets
This method configures the FST to handle datasets that exceed available memory by processing data in chunks and using external storage when needed.
§Streaming Features
- Chunk Processing: Processes large FSTs in manageable chunks
- External Storage: Uses temporary files for intermediate results
- Memory Pressure Handling: Automatically manages memory usage
- Progress Tracking: Provides callbacks for long-running operations
§Arguments
config- Configuration for streaming behavior
§Examples
let mut fst = CompactFst::<TropicalWeight, DefaultCompactor<TropicalWeight>>::new();
let config = StreamingConfig {
chunk_size: 10000,
temp_dir: "/tmp/arcweight".to_string(),
memory_limit: Some(500_000_000), // 500MB
progress_callback: None,
};
fst.enable_streaming(config);Sourcepub fn analyze_compression_patterns(&self) -> CompressionAnalysis
pub fn analyze_compression_patterns(&self) -> CompressionAnalysis
Analyze data patterns and recommend optimal compression strategy
This method examines the current FST data to determine which compression strategy would be most effective, considering both compression ratio and access performance.
§Returns
A CompressionAnalysis struct containing recommendations and statistics
§Examples
let fst = CompactFst::<TropicalWeight, DefaultCompactor<TropicalWeight>>::new();
let analysis = fst.analyze_compression_patterns();
println!("Recommended strategy: {:?}", analysis.recommended_strategy);
println!("Expected compression ratio: {:.2}", analysis.expected_ratio);Sourcepub fn stream_construct<I>(&mut self, input_stream: I, config: StreamingConfig)
pub fn stream_construct<I>(&mut self, input_stream: I, config: StreamingConfig)
Stream large FST construction with memory management
This method allows constructing very large FSTs by processing input data in streams, automatically managing memory pressure and using external storage when needed.
§Arguments
input_stream- Iterator over input arcs or statesconfig- Streaming configuration
§Examples
let mut fst = CompactFst::<TropicalWeight, DefaultCompactor<TropicalWeight>>::new();
let config = StreamingConfig::default();
let large_input = (0..100).map(|i| {
Arc::new(i, i, TropicalWeight::one(), i + 1)
});
fst.stream_construct(large_input, config);Source§impl<W: Semiring, C: Compactor<W>> CompactFst<W, C>
impl<W: Semiring, C: Compactor<W>> CompactFst<W, C>
Sourcepub fn expanded_arcs(&self, state: StateId) -> Vec<Arc<W>>
pub fn expanded_arcs(&self, state: StateId) -> Vec<Arc<W>>
Get expanded arcs for a state as owned Vec
This method provides ExpandedFst-like functionality by returning an owned vector of expanded arcs. While this involves copying, it avoids the lifetime complications of maintaining cached references.
§Arguments
state- The state ID to get arcs for
§Returns
A vector containing all expanded arcs from the specified state
§Examples
let fst = CompactFst::<TropicalWeight, DefaultCompactor<TropicalWeight>>::new();
let arcs = fst.expanded_arcs(0);
for arc in &arcs {
println!("Arc: {} -> {} / {}", arc.ilabel, arc.olabel, arc.weight);
}Sourcepub fn expanded_arcs_cached(&self, state: StateId) -> Vec<Arc<W>>
pub fn expanded_arcs_cached(&self, state: StateId) -> Vec<Arc<W>>
Get expanded arcs with caching for performance
This method implements a simple state-level cache to avoid repeated decompression of the same state’s arcs. The cache is implemented using interior mutability patterns.
Note: This is a conceptual implementation. A full implementation would use more complex caching strategies with eviction policies.
Sourcepub fn prefetch_arcs<I>(&self, states: I)where
I: IntoIterator<Item = StateId>,
pub fn prefetch_arcs<I>(&self, states: I)where
I: IntoIterator<Item = StateId>,
Prefetch and cache arcs for multiple states
This method proactively decompresses and caches arcs for multiple states to improve performance of subsequent accesses. Useful for algorithms that will access many states in sequence.
§Arguments
states- Iterator of state IDs to prefetch
§Examples
let fst = CompactFst::<TropicalWeight, DefaultCompactor<TropicalWeight>>::new();
// Prefetch arcs for states 0-9
fst.prefetch_arcs(0..10);
// Subsequent accesses to these states will be faster
for state in 0..10 {
let arcs = fst.expanded_arcs(state);
// Process arcs...
}Sourcepub fn clear_arc_cache(&self)
pub fn clear_arc_cache(&self)
Clear the arc expansion cache
This method clears any cached expanded arcs to free memory. Useful for memory management in long-running applications.
Sourcepub fn cache_stats(&self) -> CacheStats
pub fn cache_stats(&self) -> CacheStats
Get cache statistics for monitoring and optimization
Returns information about cache performance including hit rates, memory usage, and eviction statistics.
Sourcepub fn set_prefetching(&mut self, _enabled: bool)
pub fn set_prefetching(&mut self, _enabled: bool)
Enable or disable smart prefetching based on access patterns
When enabled, the FST will analyze access patterns and proactively decompress arcs for states that are likely to be accessed soon.
Sourcepub fn supports_efficient_expansion(&self) -> bool
pub fn supports_efficient_expansion(&self) -> bool
Check if ExpandedFst functionality is efficiently supported
Returns true if this CompactFst instance can efficiently provide ExpandedFst operations, or false if operations will be slow due to compression overhead.
Trait Implementations§
Source§impl<W: Clone + Semiring, C: Clone + Compactor<W>> Clone for CompactFst<W, C>
impl<W: Clone + Semiring, C: Clone + Compactor<W>> Clone for CompactFst<W, C>
Source§fn clone(&self) -> CompactFst<W, C>
fn clone(&self) -> CompactFst<W, C>
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read moreSource§impl<W: Semiring, C: Compactor<W>> ExpandedFst<W> for CompactFst<W, C>
Implementation of ExpandedFst for CompactFst with on-demand decompression
impl<W: Semiring, C: Compactor<W>> ExpandedFst<W> for CompactFst<W, C>
Implementation of ExpandedFst for CompactFst with on-demand decompression
This implementation provides direct access to arc slices while maintaining compression benefits through intelligent caching and decompression strategies. The FST transparently decompresses arcs when slice access is requested, caching results for subsequent accesses.
§On-Demand Decompression Strategy
- Lazy Expansion: Arcs are decompressed only when arcs_slice() is called
- State-Level Caching: Each state maintains a cache of its expanded arcs
- Memory Management: Caches are evicted based on usage patterns and memory pressure
- Prefetching: Related states may be pre-expanded based on access patterns
§Performance Characteristics
- First Access: O(k) where k is the number of arcs (decompression cost)
- Cached Access: O(1) direct slice access
- Memory Usage: Compressed size + cache for accessed states
- Cache Performance: Excellent for repeated traversals, good for algorithms requiring arc slices
§Use Cases
- Algorithms requiring direct arc array access (sort, search, vectorized operations)
- Frequent traversal of the same states
- Performance-critical code that benefits from arc slice optimization
- Compatibility with existing ExpandedFst-based algorithms
Source§fn arcs_slice(&self, _state: StateId) -> &[Arc<W>]
fn arcs_slice(&self, _state: StateId) -> &[Arc<W>]
Source§impl<W: Semiring, C: Compactor<W>> Fst<W> for CompactFst<W, C>
impl<W: Semiring, C: Compactor<W>> Fst<W> for CompactFst<W, C>
Source§fn num_states(&self) -> usize
fn num_states(&self) -> usize
Source§fn properties(&self) -> FstProperties
fn properties(&self) -> FstProperties
Source§fn num_arcs_total(&self) -> usize
fn num_arcs_total(&self) -> usize
Source§impl<W: Semiring, C: Compactor<W>> MutableFst<W> for CompactFst<W, C>
Implementation of MutableFst for CompactFst with dynamic recompression
impl<W: Semiring, C: Compactor<W>> MutableFst<W> for CompactFst<W, C>
Implementation of MutableFst for CompactFst with dynamic recompression
This implementation allows CompactFst to be modified while maintaining compression benefits. When modifications are made, the FST intelligently recompresses data to maintain optimal space usage.
§Dynamic Recompression Strategy
- Lazy Recompression: Modifications are batched and compressed periodically
- Adaptive Triggers: Recompression occurs when efficiency drops below threshold
- Incremental Updates: Small changes are applied without full recompression
- Smart Caching: Frequently accessed data is kept uncompressed temporarily
§Performance Characteristics
- Add Operations: O(1) amortized with batching, O(n) worst case during recompression
- Memory Usage: May temporarily increase during modification, returns to compressed size
- Recompression Cost: Proportional to modified data size, not entire FST
Source§fn delete_arcs(&mut self, state: StateId)
fn delete_arcs(&mut self, state: StateId)
Source§fn delete_arc(&mut self, state: StateId, arc_idx: usize)
fn delete_arc(&mut self, state: StateId, arc_idx: usize)
Source§fn reserve_states(&mut self, n: usize)
fn reserve_states(&mut self, n: usize)
Source§fn reserve_arcs(&mut self, _state: StateId, n: usize)
fn reserve_arcs(&mut self, _state: StateId, n: usize)
Source§fn remove_final(&mut self, state: StateId)
fn remove_final(&mut self, state: StateId)
Auto Trait Implementations§
impl<W, C> Freeze for CompactFst<W, C>where
C: Freeze,
impl<W, C> RefUnwindSafe for CompactFst<W, C>
impl<W, C> Send for CompactFst<W, C>
impl<W, C> Sync for CompactFst<W, C>
impl<W, C> Unpin for CompactFst<W, C>
impl<W, C> UnwindSafe for CompactFst<W, C>
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more