Skip to main content

kproc_llm/
error.rs

1use std::sync::PoisonError;
2
3#[cfg(feature = "candle-git")]
4use candle_git_core as candle_core;
5
6/// Error enum for kproc
7#[derive(thiserror::Error, Debug)]
8#[allow(missing_docs)]
9#[non_exhaustive]
10pub enum Error
11{
12  #[cfg(feature = "simple-api")]
13  #[error("Serialization error: {0}")]
14  JsonSerializationError(#[from] serde_json::Error),
15  #[error("SendError in streams communication, most likely end of stream: {0}.")]
16  SendError(String),
17  #[error("IO Error: {0}.")]
18  IOError(#[from] std::io::Error),
19  #[error("Deserialization error: {0}")]
20  DeserializationError(String),
21  #[cfg(feature = "ollama")]
22  #[error("Ollama error: {0}")]
23  OllamaError(#[from] ollama_rs::error::OllamaError),
24  #[error("Poison (Mutex/RwLock) error: {0}")]
25  PoisonError(String),
26  #[error("{0}")]
27  FuturesMpscSendError(#[from] futures::channel::mpsc::SendError),
28  #[cfg(feature = "image")]
29  #[error("{0}")]
30  ValueError(#[from] kproc_values::Error),
31  #[cfg(feature = "llama.cpp")]
32  #[error("{0}")]
33  HfApiError(#[from] hf_hub::api::sync::ApiError),
34  #[cfg(any(feature = "candle", feature = "candle-git"))]
35  #[error("{0}")]
36  HfTokioApiError(#[from] hf_hub::api::tokio::ApiError),
37  #[error("Invalid HugginFace uri.")]
38  HfInvalidUri,
39  #[cfg(feature = "llama.cpp")]
40  #[error("{0}")]
41  LlamaCpp(#[from] llama_cpp_2::LlamaCppError),
42  #[cfg(feature = "llama.cpp")]
43  #[error("{0}")]
44  LlamaModelLoad(#[from] llama_cpp_2::LlamaModelLoadError),
45  #[cfg(feature = "llama.cpp")]
46  #[error("{0}")]
47  LlamaContextLoad(#[from] llama_cpp_2::LlamaContextLoadError),
48  #[cfg(feature = "llama.cpp")]
49  #[error("{0}")]
50  LlamaStringToToken(#[from] llama_cpp_2::StringToTokenError),
51  #[cfg(feature = "llama.cpp")]
52  #[error("{0}")]
53  LlamaBatchAddError(#[from] llama_cpp_2::llama_batch::BatchAddError),
54  #[cfg(feature = "llama.cpp")]
55  #[error("{0}")]
56  LlamaDecodeError(#[from] llama_cpp_2::DecodeError),
57  #[cfg(feature = "llama.cpp")]
58  #[error("{0}")]
59  LlamaTokenToString(#[from] llama_cpp_2::TokenToStringError),
60  #[cfg(feature = "template")]
61  #[error("{0}")]
62  MinijinjaError(#[from] minijinja::Error),
63  #[cfg(feature = "llama.cpp")]
64  #[error("{0}")]
65  CCUtilsServerError(#[from] ccutils::servers::ServerError),
66  #[cfg(feature = "llama.cpp")]
67  #[error("Invalid JSON Grammar")]
68  InvalidJsonGrammar,
69  #[cfg(feature = "simple-api")]
70  #[error("HTTP Request error: {0}")]
71  HttpError(String),
72  #[cfg(feature = "simple-api")]
73  #[error("Error in API usage: code: {code} message: {message} type: {error_type}")]
74  SimpleApiError
75  {
76    code: u32,
77    message: String,
78    error_type: String,
79  },
80  #[cfg(any(feature = "candle", feature = "candle-git"))]
81  #[error("Error in candle: {0}")]
82  CandleError(#[from] candle_core::Error),
83  #[error("Model was not properly defined.")]
84  UndefinedModel,
85  #[error("Unknwon end-of-stream token '{0}'.")]
86  UnknownEndOfStream(String),
87  #[error("Other error: {0}")]
88  Other(String),
89  #[error("Unsupported file format")]
90  UnsupportedFileFormat,
91}
92
93impl<T> From<PoisonError<T>> for Error
94{
95  fn from(value: PoisonError<T>) -> Self
96  {
97    Error::PoisonError(value.to_string())
98  }
99}
100
101impl From<Box<dyn std::error::Error + Send + Sync>> for Error
102{
103  fn from(err: Box<dyn std::error::Error + Send + Sync>) -> Self
104  {
105    Error::Other(err.to_string())
106  }
107}