llama_core/
error.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
//! Error types for the Llama Core library.

use thiserror::Error;

/// Error types for the Llama Core library.
#[derive(Error, Debug)]
pub enum LlamaCoreError {
    /// Errors in General operation.
    #[error("{0}")]
    Operation(String),
    /// Errors in Context initialization.
    #[error("Failed to initialize computation context. Reason: {0}")]
    InitContext(String),
    /// Errors thrown by the wasi-nn-ggml plugin and runtime.
    #[error("{0}")]
    Backend(#[from] BackendError),
    /// Errors thrown by the Search Backend
    #[cfg(feature = "search")]
    #[cfg_attr(docsrs, doc(cfg(feature = "search")))]
    #[error("{0}")]
    Search(String),
    /// Errors in file not found.
    #[error("File not found.")]
    FileNotFound,
    /// Errors in Qdrant.
    #[cfg(feature = "rag")]
    #[cfg_attr(docsrs, doc(cfg(feature = "rag")))]
    #[error("Qdrant error:{0}")]
    Qdrant(String),
}

/// Error types for wasi-nn errors.
#[derive(Error, Debug)]
pub enum BackendError {
    /// Errors in setting the input tensor.
    #[error("{0}")]
    SetInput(String),
    /// Errors in the model inference.
    #[error("{0}")]
    Compute(String),
    /// Errors in the model inference in the stream mode.
    #[error("{0}")]
    ComputeSingle(String),
    /// Errors in getting the output tensor.
    #[error("{0}")]
    GetOutput(String),
    /// Errors in getting the output tensor in the stream mode.
    #[error("{0}")]
    GetOutputSingle(String),
    /// Errors in cleaning up the computation context in the stream mode.
    #[error("{0}")]
    FinishSingle(String),
}