use llama_cpp_v3::LlamaError;
#[derive(Debug, thiserror::Error)]
pub enum AgentError {
#[error("Llama backend error: {0}")]
Llama(#[from] LlamaError),
#[error("Tool error: {tool}: {message}")]
Tool { tool: String, message: String },
#[error("Tool not found: {0}")]
ToolNotFound(String),
#[error("Failed to parse tool call from model output: {0}")]
ToolCallParse(String),
#[error("Permission denied for tool: {0}")]
PermissionDenied(String),
#[error("Max iterations ({0}) exceeded")]
MaxIterations(usize),
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("JSON error: {0}")]
Json(#[from] serde_json::Error),
#[error("No model loaded")]
NoModel,
#[error("Agent error: {0}")]
Other(String),
}