kproc_llm/
error.rs

1use std::sync::PoisonError;
2
3/// Error enum for kproc
4#[derive(thiserror::Error, Debug)]
5#[allow(missing_docs)]
6pub enum Error
7{
8  #[error("SendError in streams communication, most likely end of stream: {0}.")]
9  SendError(String),
10  #[error("IO Error: {0}.")]
11  IOError(#[from] std::io::Error),
12  #[error("Deserialization error: {0}")]
13  DeserializationError(String),
14  #[cfg(feature = "ollama")]
15  #[error("Ollama error: {0}")]
16  OllamaError(#[from] ollama_rs::error::OllamaError),
17  #[error("Poison (Mutex/RwLock) error: {0}")]
18  PoisonError(String),
19  #[error("{0}")]
20  FuturesMpscSendError(#[from] futures::channel::mpsc::SendError),
21  #[cfg(feature = "image")]
22  #[error("{0}")]
23  ValueError(#[from] kproc_values::Error),
24  #[cfg(feature = "llama.cpp")]
25  #[error("{0}")]
26  HfApiError(#[from] hf_hub::api::sync::ApiError),
27  #[error("Invalid HugginFace uri.")]
28  HfInvalidUri,
29  #[cfg(feature = "llama.cpp")]
30  #[error("{0}")]
31  LlamaCpp(#[from] llama_cpp_2::LLamaCppError),
32  #[cfg(feature = "llama.cpp")]
33  #[error("{0}")]
34  LlamaModelLoad(#[from] llama_cpp_2::LlamaModelLoadError),
35  #[cfg(feature = "llama.cpp")]
36  #[error("{0}")]
37  LlamaContextLoad(#[from] llama_cpp_2::LlamaContextLoadError),
38  #[cfg(feature = "llama.cpp")]
39  #[error("{0}")]
40  LlamaStringToToken(#[from] llama_cpp_2::StringToTokenError),
41  #[cfg(feature = "llama.cpp")]
42  #[error("{0}")]
43  LlamaBatchAddError(#[from] llama_cpp_2::llama_batch::BatchAddError),
44  #[cfg(feature = "llama.cpp")]
45  #[error("{0}")]
46  LlamaDecodeError(#[from] llama_cpp_2::DecodeError),
47  #[cfg(feature = "llama.cpp")]
48  #[error("{0}")]
49  LlamaTokenToString(#[from] llama_cpp_2::TokenToStringError),
50  #[cfg(feature = "template")]
51  #[error("{0}")]
52  MinijinjaError(#[from] minijinja::Error),
53}
54
55impl<T> From<PoisonError<T>> for Error
56{
57  fn from(value: PoisonError<T>) -> Self
58  {
59    Error::PoisonError(value.to_string())
60  }
61}