pub mod backend;
#[cfg(feature = "client")]
pub mod client;
pub mod config;
pub mod engine;
#[cfg(feature = "server")]
pub mod engine_batched;
pub mod gguf;
#[cfg(feature = "huggingface")]
pub mod huggingface;
pub mod model;
#[cfg(feature = "onnx")]
pub mod onnx;
#[cfg(feature = "distributed")]
pub mod distributed;
pub mod rag;
pub mod sampling;
#[cfg(feature = "server")]
pub mod server;
pub mod tensor;
pub mod tokenizer;
pub use config::{Config, ConfigError};
pub use engine::{ChatEngine, ChatTemplate, Engine, EngineConfig, EngineError};
pub use backend::{default_backend, Backend, BackendError};
pub use backend::tensor_parallel::{
ShardingPlan, SingleDeviceTP, TPConfig, TensorParallel, merge_shards, shard_weight,
};
pub use gguf::{
GgufBuilder, GgufData, GgufFile, GgufReader, GgufWriter, TensorToWrite,
QuantizeOptions, QuantizeStats, quantize_model,
};
pub use model::{
Architecture, InferenceContext, KVCache, LlamaModel, Model, ModelConfig, ModelError,
ModelLoader, load_llama_model,
AttentionLayer, DeltaNetConfig, DeltaNetLayer, DeltaNetState, RecurrentState,
LoraAdapter, LoraAdapters, LoraConfig,
MoeConfig, MoeExpert, MoeLayer, MoeRouter, MoeStats,
SpeculativeConfig, SpeculativeDecoder, SpeculativeMode, SpeculativeStats,
EmbeddingConfig, EmbeddingError, EmbeddingExtractor, PoolingStrategy, TruncationStrategy,
cosine_similarity, dot_product, euclidean_distance, find_nearest,
CachedPrefix, PrefixId, PrefixSharing, PromptCache, PromptCacheConfig, PromptCacheStats,
KVCacheFormat, QuantizedKVCache,
BlockId, BlockTable, PageAllocator, PagedKVPool, PagedSequence, DEFAULT_BLOCK_SIZE,
};
pub use sampling::{
Grammar, GrammarSampler, GbnfGrammar, JsonGrammar, RegexGrammar,
MirostatConfig, Sampler, SamplerConfig,
};
pub use tensor::{DType, Tensor, TensorError, TensorStorage};
pub use tokenizer::{Tokenizer, TokenizerError};
#[cfg(feature = "huggingface")]
pub use huggingface::{HfClient, HfError, HfFileInfo, format_bytes};
#[cfg(feature = "onnx")]
pub use onnx::{HfConfig, OnnxError, OnnxFile, OnnxMetadata, OnnxModelLoader, OnnxTensorInfo};
#[cfg(feature = "rag")]
pub use rag::{
RagConfig, RagStore, RagError, RagResult, Document, NewDocument, RagContextBuilder, TextChunker,
IndexType, SearchType, DistanceMetric, DatabaseConfig, EmbeddingsConfig, SearchConfig,
KnowledgeBase, KnowledgeBaseBuilder, KnowledgeBaseConfig, DataSource, ChunkingStrategy,
RetrievalConfig, RetrievalResponse, RetrieveAndGenerateResponse, RetrievedChunk,
Citation, SourceLocation, IngestionResult,
EmbeddingGenerator,
MetadataFilter,
};
#[cfg(feature = "rag-sqlite")]
pub use rag::{
SqliteStore, SqliteConfig, SqliteDocument, SqliteNewDocument, SqliteMetadataFilter,
SqliteDistanceMetric,
};
#[cfg(all(feature = "rag-sqlite", not(feature = "rag")))]
pub use rag::{RagError, RagResult};
#[cfg(feature = "server")]
pub use engine_batched::{
BatchFinishReason, BatchRequest, BatchToken, BatchedEngine, BatchedEngineConfig,
};
#[cfg(feature = "distributed")]
pub use distributed::{
ClusterConfig, Coordinator, DistributedError, DistributedModel, DistributedResult,
PipelineExecutor, ShardServer, ShardSpec,
};
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("GGUF error: {0}")]
Gguf(#[from] gguf::GgufError),
#[error("Tensor error: {0}")]
Tensor(#[from] tensor::TensorError),
#[error("Backend error: {0}")]
Backend(#[from] backend::BackendError),
}
pub type Result<T> = std::result::Result<T, Error>;