daimon_core/lib.rs
1//! # daimon-core
2//!
3//! Core traits and types for the [Daimon](https://docs.rs/daimon) AI agent
4//! framework. This crate is the **plugin interface** — implement [`Model`] to
5//! add a new LLM provider.
6//!
7//! Provider crates depend on `daimon-core` for the shared types and trait.
8//! The main `daimon` crate re-exports everything from here, so end users
9//! typically never need to depend on `daimon-core` directly.
10//!
11//! ## Implementing a Provider
12//!
13//! ```ignore
14//! use daimon_core::{Model, ChatRequest, ChatResponse, Result, ResponseStream};
15//!
16//! pub struct MyProvider { /* ... */ }
17//!
18//! impl Model for MyProvider {
19//! async fn generate(&self, request: &ChatRequest) -> Result<ChatResponse> {
20//! // call your LLM API
21//! todo!()
22//! }
23//!
24//! async fn generate_stream(&self, request: &ChatRequest) -> Result<ResponseStream> {
25//! todo!()
26//! }
27//! }
28//! ```
29
30pub mod distributed;
31mod embedding;
32mod error;
33mod model;
34mod stream;
35mod tool_types;
36mod types;
37
38pub use distributed::{AgentTask, ErasedTaskBroker, TaskBroker, TaskResult, TaskStatus};
39pub use embedding::{EmbeddingModel, ErasedEmbeddingModel, SharedEmbeddingModel};
40pub use error::{DaimonError, Result};
41pub use model::{ErasedModel, Model, SharedModel};
42pub use stream::{ResponseStream, StreamEvent};
43pub use tool_types::ToolCall;
44pub use types::{ChatRequest, ChatResponse, Message, Role, StopReason, ToolSpec, Usage};