cortexai_llm_client/lib.rs
1//! # LLM Client - Shared Logic
2//!
3//! Runtime-agnostic LLM client logic for building requests and parsing responses.
4//! This crate has NO runtime dependencies (no async, no HTTP client).
5//!
6//! ## Supported Providers
7//!
8//! - OpenAI (GPT-4, GPT-3.5, etc.)
9//! - Anthropic (Claude 3, etc.)
10//! - OpenRouter (100+ models)
11//!
12//! ## Usage
13//!
14//! ```rust
15//! use cortexai_llm_client::{
16//! Provider, Message, RequestBuilder, ResponseParser,
17//! };
18//!
19//! // Build a request
20//! let messages = vec![
21//! Message::system("You are a helpful assistant."),
22//! Message::user("Hello!"),
23//! ];
24//!
25//! let request = RequestBuilder::new(Provider::OpenAI)
26//! .model("gpt-4o-mini")
27//! .messages(&messages)
28//! .api_key("sk-...")
29//! .temperature(0.7)
30//! .max_tokens(1024)
31//! .stream(false)
32//! .build()
33//! .unwrap();
34//!
35//! // Use your runtime's HTTP client to send request.url, request.headers, request.body
36//! // Then parse the response:
37//!
38//! let response_json = r#"{"choices":[{"message":{"content":"Hello!"}}]}"#;
39//! let response = ResponseParser::parse(Provider::OpenAI, response_json).unwrap();
40//! println!("{}", response.content);
41//! ```
42
43mod error;
44mod message;
45mod provider;
46mod request;
47mod response;
48
49pub use error::{LlmClientError, Result};
50pub use message::{Message, Role};
51pub use provider::Provider;
52pub use request::{HttpRequest, RequestBuilder};
53pub use response::{LlmResponse, ResponseParser, StreamChunk, ToolCall, ToolCallChunk, Usage};