llm_observatory_sdk/lib.rs
1// Copyright 2025 LLM Observatory Contributors
2// SPDX-License-Identifier: Apache-2.0
3
4//! LLM Observatory Rust SDK
5//!
6//! This SDK provides trait-based instrumentation for Large Language Model (LLM) applications,
7//! enabling comprehensive observability through OpenTelemetry integration.
8//!
9//! # Features
10//!
11//! - Automatic tracing of LLM requests and responses
12//! - Cost calculation based on token usage
13//! - Support for streaming completions
14//! - OpenTelemetry-based observability
15//! - Provider-agnostic trait design
16//! - Built-in support for OpenAI, Anthropic, and more
17//!
18//! # Quick Start
19//!
20//! ```rust,no_run
21//! use llm_observatory_sdk::{LLMObservatory, InstrumentedLLM, OpenAIClient};
22//!
23//! #[tokio::main]
24//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
25//! // Initialize the observatory
26//! let observatory = LLMObservatory::builder()
27//! .with_service_name("my-app")
28//! .with_otlp_endpoint("http://localhost:4317")
29//! .build()?;
30//!
31//! // Create an instrumented client
32//! let client = OpenAIClient::new("your-api-key")
33//! .with_observatory(observatory);
34//!
35//! // Make an instrumented LLM call
36//! let response = client.chat_completion()
37//! .model("gpt-4")
38//! .message("user", "Hello, world!")
39//! .send()
40//! .await?;
41//!
42//! println!("Response: {}", response.content);
43//! println!("Cost: ${:.6}", response.cost_usd);
44//!
45//! Ok(())
46//! }
47//! ```
48//!
49//! # Architecture
50//!
51//! The SDK is built around several core concepts:
52//!
53//! - [`LLMObservatory`]: Central observability manager that handles OpenTelemetry setup
54//! - [`InstrumentedLLM`]: Trait for LLM clients with automatic instrumentation
55//! - [`OpenAIClient`]: OpenAI-specific implementation with full API support
56//! - Cost calculation: Automatic cost tracking based on provider pricing
57//!
58//! # OpenTelemetry Integration
59//!
60//! All LLM operations are automatically traced using OpenTelemetry semantic conventions
61//! for GenAI operations, making them compatible with standard observability tools like
62//! Jaeger, Prometheus, and Grafana.
63
64#![warn(missing_docs, rust_2018_idioms)]
65#![deny(unsafe_code)]
66
67pub mod cost;
68pub mod error;
69pub mod instrument;
70pub mod observatory;
71pub mod traits;
72
73#[cfg(feature = "openai")]
74pub mod openai;
75
76// Re-export core types
77pub use llm_observatory_core::{
78 provider::Pricing,
79 span::{ChatMessage, LlmInput, LlmOutput, LlmSpan, SpanStatus},
80 types::{Cost, Latency, Metadata, Provider, TokenUsage},
81 Error as CoreError, Result as CoreResult,
82};
83
84// Re-export SDK types
85pub use error::{Error, Result};
86pub use instrument::{InstrumentedSpan, SpanBuilder};
87pub use observatory::{LLMObservatory, ObservatoryBuilder};
88pub use traits::{ChatCompletionRequest, ChatCompletionResponse, InstrumentedLLM, StreamChunk};
89
90#[cfg(feature = "openai")]
91pub use openai::{OpenAIClient, OpenAIConfig};
92
93// Re-export async_trait for convenience
94pub use async_trait::async_trait;
95
96/// SDK version
97pub const VERSION: &str = env!("CARGO_PKG_VERSION");
98
99/// Initialize the SDK with default settings.
100///
101/// This is a convenience function that creates an [`LLMObservatory`] instance
102/// with sensible defaults for local development.
103///
104/// # Example
105///
106/// ```rust,no_run
107/// use llm_observatory_sdk::init;
108///
109/// #[tokio::main]
110/// async fn main() -> Result<(), Box<dyn std::error::Error>> {
111/// let observatory = init("my-service").await?;
112/// // Use observatory with your LLM clients...
113/// Ok(())
114/// }
115/// ```
116pub async fn init(service_name: impl Into<String>) -> Result<LLMObservatory> {
117 LLMObservatory::builder()
118 .with_service_name(service_name)
119 .build()
120}
121
122/// Initialize the SDK with a custom OTLP endpoint.
123///
124/// # Arguments
125///
126/// * `service_name` - Name of your service for tracing
127/// * `otlp_endpoint` - OTLP gRPC endpoint (e.g., "http://localhost:4317")
128///
129/// # Example
130///
131/// ```rust,no_run
132/// use llm_observatory_sdk::init_with_endpoint;
133///
134/// #[tokio::main]
135/// async fn main() -> Result<(), Box<dyn std::error::Error>> {
136/// let observatory = init_with_endpoint(
137/// "my-service",
138/// "http://collector:4317"
139/// ).await?;
140/// Ok(())
141/// }
142/// ```
143pub async fn init_with_endpoint(
144 service_name: impl Into<String>,
145 otlp_endpoint: impl Into<String>,
146) -> Result<LLMObservatory> {
147 LLMObservatory::builder()
148 .with_service_name(service_name)
149 .with_otlp_endpoint(otlp_endpoint)
150 .build()
151}
152
153#[cfg(test)]
154mod tests {
155 use super::*;
156
157 #[test]
158 fn test_version() {
159 assert!(!VERSION.is_empty());
160 }
161}