kproc_llm/
error.rs

1use std::sync::PoisonError;
2
3/// Error enum for kproc
4#[derive(thiserror::Error, Debug)]
5#[allow(missing_docs)]
6#[non_exhaustive]
7pub enum Error
8{
9  #[cfg(feature = "simple-api")]
10  #[error("Serialization error: {0}")]
11  JsonSerializationError(#[from] serde_json::Error),
12  #[error("SendError in streams communication, most likely end of stream: {0}.")]
13  SendError(String),
14  #[error("IO Error: {0}.")]
15  IOError(#[from] std::io::Error),
16  #[error("Deserialization error: {0}")]
17  DeserializationError(String),
18  #[cfg(feature = "ollama")]
19  #[error("Ollama error: {0}")]
20  OllamaError(#[from] ollama_rs::error::OllamaError),
21  #[error("Poison (Mutex/RwLock) error: {0}")]
22  PoisonError(String),
23  #[error("{0}")]
24  FuturesMpscSendError(#[from] futures::channel::mpsc::SendError),
25  #[cfg(feature = "image")]
26  #[error("{0}")]
27  ValueError(#[from] kproc_values::Error),
28  #[cfg(feature = "llama.cpp")]
29  #[error("{0}")]
30  HfApiError(#[from] hf_hub::api::sync::ApiError),
31  #[cfg(feature = "candle")]
32  #[error("{0}")]
33  HfTokioApiError(#[from] hf_hub::api::tokio::ApiError),
34  #[error("Invalid HugginFace uri.")]
35  HfInvalidUri,
36  #[cfg(feature = "llama.cpp")]
37  #[error("{0}")]
38  LlamaCpp(#[from] llama_cpp_2::LLamaCppError),
39  #[cfg(feature = "llama.cpp")]
40  #[error("{0}")]
41  LlamaModelLoad(#[from] llama_cpp_2::LlamaModelLoadError),
42  #[cfg(feature = "llama.cpp")]
43  #[error("{0}")]
44  LlamaContextLoad(#[from] llama_cpp_2::LlamaContextLoadError),
45  #[cfg(feature = "llama.cpp")]
46  #[error("{0}")]
47  LlamaStringToToken(#[from] llama_cpp_2::StringToTokenError),
48  #[cfg(feature = "llama.cpp")]
49  #[error("{0}")]
50  LlamaBatchAddError(#[from] llama_cpp_2::llama_batch::BatchAddError),
51  #[cfg(feature = "llama.cpp")]
52  #[error("{0}")]
53  LlamaDecodeError(#[from] llama_cpp_2::DecodeError),
54  #[cfg(feature = "llama.cpp")]
55  #[error("{0}")]
56  LlamaTokenToString(#[from] llama_cpp_2::TokenToStringError),
57  #[cfg(feature = "template")]
58  #[error("{0}")]
59  MinijinjaError(#[from] minijinja::Error),
60  #[cfg(feature = "llama.cpp")]
61  #[error("{0}")]
62  CCUtilsServerError(#[from] ccutils::servers::ServerError),
63  #[cfg(feature = "llama.cpp")]
64  #[error("Invalid JSON Grammar")]
65  InvalidJsonGrammar,
66  #[cfg(feature = "simple-api")]
67  #[error("HTTP Request error: {0}")]
68  HttpError(String),
69  #[cfg(feature = "simple-api")]
70  #[error("Error in API usage: code: {code} message: {message} type: {error_type}")]
71  SimpleApiError
72  {
73    code: u32,
74    message: String,
75    error_type: String,
76  },
77  #[cfg(feature = "candle")]
78  #[error("Error in candle: {0}")]
79  CandleError(#[from] candle_core::Error),
80  #[error("Model was not properly defined.")]
81  UndefinedModel,
82  #[error("Unknwon end-of-stream token '{0}'.")]
83  UnknownEndOfStream(String),
84  #[error("Other error: {0}")]
85  Other(String),
86}
87
88impl<T> From<PoisonError<T>> for Error
89{
90  fn from(value: PoisonError<T>) -> Self
91  {
92    Error::PoisonError(value.to_string())
93  }
94}
95
96impl From<Box<dyn std::error::Error + Send + Sync>> for Error
97{
98  fn from(err: Box<dyn std::error::Error + Send + Sync>) -> Self
99  {
100    Error::Other(err.to_string())
101  }
102}