use kalosm::language::*;
use surrealdb::{engine::local::SurrealKv, Surreal};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let exists = std::path::Path::new("./db").exists();
let db = Surreal::new::<SurrealKv>("./db/temp.db").await?;
db.use_ns("test").use_db("test").await?;
let chunker = SemanticChunker::new();
let document_table = db
.document_table_builder("documents")
.with_chunker(chunker)
.at("./db/embeddings.db")
.build::<Document>()
.await?;
if !exists {
std::fs::create_dir_all("documents")?;
let context = [
"https://floneum.com/kalosm/docs",
"https://floneum.com/kalosm/docs/guides/retrieval_augmented_generation",
]
.iter()
.map(|url| Url::parse(url).unwrap());
document_table.add_context(context).await?;
}
let model = Llama::new_chat().await?;
let mut chat = model.chat().with_system_prompt("The assistant help answer questions based on the context given by the user. The model knows that the information the user gives it is always true.");
loop {
let user_question = prompt_input("\n> ")?;
let context = document_table
.search(&user_question)
.with_results(1)
.await?
.into_iter()
.map(|document| {
format!(
"Title: {}\nBody: {}\n",
document.record.title(),
document.record.body()
)
})
.collect::<Vec<_>>()
.join("\n");
let prompt = format!("{context}\n{user_question}");
println!("{}", prompt);
let mut output_stream = chat(&prompt);
print!("Bot: ");
output_stream.to_std_out().await?;
}
}