fastembed 0.5.0

Rust implementation of https://github.com/qdrant/fastembed
Documentation

FastEmbed - Fast, light, accurate library built for retrieval embedding generation.

The library provides the FlagEmbedding struct to interface with the Flag embedding models.

Instantiating FlagEmbedding

use fastembed::{FlagEmbedding, InitOptions, EmbeddingModel, EmbeddingBase};

# fn model_demo() -> anyhow::Result<()> {
// With default InitOptions
let model: FlagEmbedding = FlagEmbedding::try_new(Default::default())?;

// List all supported models
dbg!(FlagEmbedding::list_supported_models());

// With custom InitOptions
let model: FlagEmbedding = FlagEmbedding::try_new(InitOptions {
model_name: EmbeddingModel::BGEBaseEN,
show_download_message: false,
..Default::default()
})?;
# Ok(())
# }

Find more info about the available options in the InitOptions documentation.

Embeddings generation

# use fastembed::{FlagEmbedding, InitOptions, EmbeddingModel, EmbeddingBase};
# fn embedding_demo() -> anyhow::Result<()> {
# let model: FlagEmbedding = FlagEmbedding::try_new(Default::default())?;
let documents = vec![
"passage: Hello, World!",
"query: Hello, World!",
"passage: This is an example passage.",
// You can leave out the prefix but it's recommended
"fastembed-rs is licensed under MIT"
];

// Generate embeddings with the default batch size, 256
let embeddings = model.embed(documents, None)?;

println!("Embeddings length: {}", embeddings.len()); // -> Embeddings length: 4
# Ok(())
# }

Generate query and passage embeddings

# use fastembed::{FlagEmbedding, InitOptions, EmbeddingModel, EmbeddingBase};
# fn query_passage_demo() -> anyhow::Result<()> {
# let model: FlagEmbedding = FlagEmbedding::try_new(Default::default())?;
let passages = vec![
"This is the first passage. It contains provides more context for retrieval.",
"Here's the second passage, which is longer than the first one. It includes additional information.",
"And this is the third passage, the longest of all. It contains several sentences and is meant for more extensive testing."
];

// Generate embeddings for the passages
// The texts are prefixed with "passage" for better results
// The batch size is set to 1 for demonstration purposes
let embeddings = model.passage_embed(passages, Some(1))?;

println!("Passage embeddings length: {}", embeddings.len()); // -> Embeddings length: 3

let query = "What is the answer to this generic question?";

// Generate embeddings for the query
// The text is prefixed with "query" for better retrieval
let query_embedding = model.query_embed(query)?;

println!("Query embedding dimension: {}", query_embedding.len()); // -> Query embedding dimension: 768
# Ok(())
# }