use std::sync::PoisonError;
#[derive(thiserror::Error, Debug)]
#[allow(missing_docs)]
pub enum Error
{
#[error("SendError in streams communication, most likely end of stream: {0}.")]
SendError(String),
#[error("IO Error: {0}.")]
IOError(#[from] std::io::Error),
#[error("Deserialization error: {0}")]
DeserializationError(String),
#[cfg(feature = "ollama")]
#[error("Ollama error: {0}")]
OllamaError(#[from] ollama_rs::error::OllamaError),
#[error("Poison (Mutex/RwLock) error: {0}")]
PoisonError(String),
#[error("{0}")]
FuturesMpscSendError(#[from] futures::channel::mpsc::SendError),
#[cfg(feature = "image")]
#[error("{0}")]
ValueError(#[from] kproc_values::Error),
#[cfg(feature = "llama.cpp")]
#[error("{0}")]
HfApiError(#[from] hf_hub::api::sync::ApiError),
#[error("Invalid HugginFace uri.")]
HfInvalidUri,
#[cfg(feature = "llama.cpp")]
#[error("{0}")]
LlamaCpp(#[from] llama_cpp_2::LLamaCppError),
#[cfg(feature = "llama.cpp")]
#[error("{0}")]
LlamaModelLoad(#[from] llama_cpp_2::LlamaModelLoadError),
#[cfg(feature = "llama.cpp")]
#[error("{0}")]
LlamaContextLoad(#[from] llama_cpp_2::LlamaContextLoadError),
#[cfg(feature = "llama.cpp")]
#[error("{0}")]
LlamaStringToToken(#[from] llama_cpp_2::StringToTokenError),
#[cfg(feature = "llama.cpp")]
#[error("{0}")]
LlamaBatchAddError(#[from] llama_cpp_2::llama_batch::BatchAddError),
#[cfg(feature = "llama.cpp")]
#[error("{0}")]
LlamaDecodeError(#[from] llama_cpp_2::DecodeError),
#[cfg(feature = "llama.cpp")]
#[error("{0}")]
LlamaTokenToString(#[from] llama_cpp_2::TokenToStringError),
#[cfg(feature = "template")]
#[error("{0}")]
MinijinjaError(#[from] minijinja::Error),
}
impl<T> From<PoisonError<T>> for Error
{
fn from(value: PoisonError<T>) -> Self
{
Error::PoisonError(value.to_string())
}
}