Skip to main content

embeddenator_vsa/
lib.rs

1//! Embeddenator VSA - Vector Symbolic Architecture Core
2//!
3//! This crate provides the foundational Vector Symbolic Architecture (VSA)
4//! operations for sparse ternary representations used in Embeddenator.
5//!
6//! # Overview
7//!
8//! VSA is a computational framework for representing and manipulating
9//! high-dimensional vectors through algebraic operations:
10//!
11//! - **Bundle (⊕)**: Superposition of multiple vectors
12//! - **Bind (⊙)**: Compositional binding of associations
13//! - **Similarity**: Cosine similarity for pattern retrieval
14//!
15//! # Core Types
16//!
17//! - [`SparseVec`]: Sparse ternary vector representation
18//! - [`PackedTritVec`]: Memory-efficient packed trit storage
19//! - [`Codebook`]: Mapping between symbols and vectors
20//!
21//! # Example
22//!
23//! ```rust
24//! use embeddenator_vsa::{SparseVec, ReversibleVSAConfig};
25//!
26//! // Create random sparse vectors
27//! let a = SparseVec::random();
28//! let b = SparseVec::random();
29//!
30//! // Bundle operation (superposition)
31//! let bundled = a.bundle(&b);
32//!
33//! // Compute similarity
34//! let similarity = a.cosine(&b);
35//! assert!(similarity >= -1.0 && similarity <= 1.0);
36//! ```
37
38use std::fmt;
39
40/// Unified error type for VSA operations
41#[derive(Debug, Clone, PartialEq, Eq)]
42pub enum VsaError {
43    /// Value is outside the valid range for the operation
44    ValueOutOfRange { value: i64, min: i64, max: i64 },
45    /// Hash operation produced unexpected length
46    InvalidHashLength { expected: usize, actual: usize },
47    /// Operation expected a non-empty collection
48    EmptyCollection,
49    /// Packed data could not be unpacked
50    InvalidPackedData,
51    /// Value cannot be converted to the target type
52    InvalidValue(String),
53}
54
55impl fmt::Display for VsaError {
56    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
57        match self {
58            VsaError::ValueOutOfRange { value, min, max } => {
59                write!(
60                    f,
61                    "Value {} is outside valid range [{}, {}]",
62                    value, min, max
63                )
64            }
65            VsaError::InvalidHashLength { expected, actual } => {
66                write!(
67                    f,
68                    "Invalid hash length: expected {}, got {}",
69                    expected, actual
70                )
71            }
72            VsaError::EmptyCollection => {
73                write!(f, "Operation requires non-empty collection")
74            }
75            VsaError::InvalidPackedData => {
76                write!(f, "Packed data could not be unpacked")
77            }
78            VsaError::InvalidValue(msg) => {
79                write!(f, "Invalid value: {}", msg)
80            }
81        }
82    }
83}
84
85impl std::error::Error for VsaError {}
86
87#[cfg(feature = "block-sparse")]
88pub mod block_sparse;
89pub mod codebook;
90pub mod coherency;
91pub mod dimensional;
92#[cfg(feature = "cuda")]
93pub mod gpu;
94#[cfg(feature = "cuda")]
95pub mod gpu_encoding;
96#[cfg(feature = "cuda")]
97pub mod gpu_kernels;
98pub mod phase_training;
99pub mod resonator;
100pub mod reversible_encoding;
101pub mod simd_cosine;
102pub mod ternary;
103pub mod ternary_vec;
104pub mod virtual_memory;
105pub mod vram_pool;
106pub mod vsa;
107
108// Re-export main types
109pub use codebook::{
110    BalancedTernaryWord, Codebook, CodebookTrainingConfig, ProjectionConfig, ProjectionResult,
111    SemanticOutlier, WordMetadata,
112};
113pub use dimensional::{
114    DifferentialEncoder, DifferentialEncoding, DimensionalConfig, HyperVec, Trit as DimTrit,
115    TritDepthConfig, Tryte,
116};
117#[cfg(feature = "cuda")]
118pub use gpu::{
119    GpuArch, GpuBackend, GpuConfig, GpuError, GpuMemoryConfig, DEFAULT_MEMORY_HEADROOM_PERCENT,
120    MAX_MEMORY_USAGE_RATIO, MIN_SYSTEM_RESERVE_BYTES,
121};
122#[cfg(feature = "cuda")]
123pub use gpu_encoding::GpuAcceleratedEncoder;
124#[cfg(feature = "cuda")]
125pub use gpu_kernels::GpuKernelRunner;
126pub use phase_training::{
127    train_codebook_with_phases, PhaseTrainer, PhaseTrainingConfig, PhaseTrainingStats,
128    TrainingPhase,
129};
130pub use reversible_encoding::{ReversibleVSAEncoder, MAX_POSITIONS};
131#[cfg(all(target_arch = "x86_64", target_feature = "avx2"))]
132pub use simd_cosine::cosine_avx2;
133#[cfg(all(target_arch = "x86_64", target_feature = "avx2"))]
134pub use simd_cosine::cosine_avx_vnni;
135#[cfg(target_arch = "aarch64")]
136pub use simd_cosine::cosine_neon;
137pub use simd_cosine::cosine_scalar;
138// SIMD dispatch function that automatically selects best available implementation
139pub use simd_cosine::cosine_simd;
140pub use ternary::{CorrectionEntry, ParityTrit, Trit, Tryte3, Word6};
141pub use ternary_vec::PackedTritVec;
142pub use vsa::{ReversibleVSAConfig, SparseVec, SparsityScaling, VsaConfig, VsaConfigSchema, DIM};
143
144#[cfg(feature = "block-sparse")]
145pub use block_sparse::{Block, BlockSparseError, BlockSparseTritVec, BLOCK_SIZE};
146
147// Coherency types for host/device memory management
148pub use coherency::{CoherencyManager, CoherencyState, CoherencyStats, CoherentEngram};
149
150// Multi-tier coherency protocol (#48)
151pub use coherency::{
152    SyncProtocol, Tier, TierMask, TieredBlock, TieredCoherencyStats, TieredState, WritePolicy,
153};
154
155// VRAM pool types for GPU memory management
156// Note: Non-cuda builds have stub implementations that return errors
157pub use vram_pool::{VramHandle, VramPool, VramPoolConfig, VramPoolStats};
158
159// Virtual memory abstraction for tiered storage (VRAM → Host RAM → Disk)
160pub use virtual_memory::{
161    MemoryTier, VMemHandle, VirtualMemory, VirtualMemoryConfig, VirtualMemoryError,
162    VirtualMemoryStats,
163};
164
165// Resonator network types for learned codebooks and semantic inference
166pub use resonator::{
167    FactorizationResult, RecoveredFactor, Resonator, ResonatorConfig, ResonatorStats,
168    SemanticInference, TrainingExample, TrainingResult,
169};