autoagents_llm/lib.rs
1//! AutoAgents LLM is a unified interface for interacting with Large Language Model providers.
2//!
3//! # Overview
4//! This crate provides a consistent API for working with different LLM backends by abstracting away
5//! provider-specific implementation details. It supports:
6//!
7//! - Chat-based interactions
8//! - Text completion
9//! - Embeddings generation
10//! - Multiple providers (OpenAI, Anthropic, etc.)
11//! - Request validation and retry logic
12//!
13//! # Architecture
14//! The crate is organized into modules that handle different aspects of LLM interactions:
15
16// Re-export for convenience
17pub use async_trait::async_trait;
18
19use chat::Tool;
20use serde::{Deserialize, Serialize};
21
22/// Backend implementations for supported LLM providers like OpenAI, Anthropic, etc.
23pub mod backends;
24
25/// Builder pattern for configuring and instantiating LLM providers
26pub mod builder;
27
28/// Chat-based interactions with language models (e.g. ChatGPT style)
29pub mod chat;
30
31/// Text completion capabilities (e.g. GPT-3 style completion)
32pub mod completion;
33
34/// Vector embeddings generation for text
35pub mod embedding;
36
37/// Error types and handling
38pub mod error;
39
40/// Evaluator for LLM providers
41pub mod evaluator;
42
43/// Secret store for storing API keys and other sensitive information
44pub mod secret_store;
45
46/// Listing models support
47pub mod models;
48
49mod tool;
50pub use tool::{ToolInputT, ToolT};
51
52#[inline]
53/// Initialize logging using env_logger if the "logging" feature is enabled.
54/// This is a no-op if the feature is not enabled.
55pub fn init_logging() {
56    #[cfg(feature = "logging")]
57    {
58        let _ = env_logger::try_init();
59    }
60}
61
62/// Core trait that all LLM providers must implement, combining chat, completion
63/// and embedding capabilities into a unified interface
64pub trait LLMProvider:
65    chat::ChatProvider
66    + completion::CompletionProvider
67    + embedding::EmbeddingProvider
68    + models::ModelsProvider
69    + Send
70    + Sync
71    + 'static
72{
73    fn tools(&self) -> Option<&[Tool]> {
74        None
75    }
76}
77
78/// Tool call represents a function call that an LLM wants to make.
79/// This is a standardized structure used across all providers.
80#[derive(Debug, Deserialize, Serialize, Clone, Eq, PartialEq)]
81pub struct ToolCall {
82    /// The ID of the tool call.
83    pub id: String,
84    /// The type of the tool call (usually "function").
85    #[serde(rename = "type")]
86    pub call_type: String,
87    /// The function to call.
88    pub function: FunctionCall,
89}
90
91/// FunctionCall contains details about which function to call and with what arguments.
92#[derive(Debug, Deserialize, Serialize, Clone, Eq, PartialEq)]
93pub struct FunctionCall {
94    /// The name of the function to call.
95    pub name: String,
96    /// The arguments to pass to the function, typically serialized as a JSON string.
97    pub arguments: String,
98}