Expand description
§Forgetless
Context optimization for LLMs. Takes massive context, outputs optimized version that fits your token budget.
§Quick Start
ⓘ
use forgetless::Forgetless;
let result = Forgetless::new(128_000)
.add("system prompt + conversation + everything...")
.add_file("document.pdf")
.add_files(&["code.rs", "data.json"])
.run()
.await?;
// Send to your LLM
let response = your_llm.chat(&result.content).await?;Re-exports§
pub use core::config::Config;pub use core::config::ForgetlessConfig;pub use core::config::ScoringConfig;pub use core::error::Error;pub use core::error::Result;pub use core::types::OptimizationStats;pub use core::types::OptimizedContext;pub use core::types::PolishedContext;pub use core::types::ScoredChunk;pub use core::types::ScoreBreakdown;pub use builder::Forgetless;pub use input::content::ContentInput;pub use input::content::FileWithPriority;pub use input::content::IntoContent;pub use input::content::IntoFileContent;pub use input::content::WithPriority;pub use input::file::read_file_content;pub use processing::chunking::Chunk;pub use processing::chunking::ChunkConfig;pub use processing::chunking::Chunker;pub use processing::chunking::ContentType;pub use processing::scoring::Priority;pub use processing::token::TokenCounter;pub use processing::token::TokenizerModel;pub use ai::embeddings::cosine_similarity;pub use ai::embeddings::embed_batch;pub use ai::embeddings::embed_text;pub use ai::embeddings::EmbeddingCache;pub use ai::llm::LLMConfig;pub use ai::llm::Quantization;pub use ai::llm::LLM;pub use ai::vision::describe_image;pub use ai::vision::describe_image_with_prompt;pub use ai::vision::init_vision;pub use ai::vision::is_vision_ready;
Modules§
- ai
- AI modules - embeddings, LLM, and vision
- builder
- Forgetless builder and optimization pipeline
- core
- Core types, configuration, and error handling
- input
- Input handling - content types and file parsing
- processing
- Content processing - chunking, scoring, and tokenization
Constants§
- VERSION
- Library version