neuron_otel/lib.rs
1//! OpenTelemetry instrumentation for neuron using GenAI semantic conventions.
2//!
3//! Implements [`ObservabilityHook`] with the [OTel GenAI semantic conventions][spec]
4//! (`gen_ai.*` namespace). Emits [`tracing`] spans — users bring their own
5//! `tracing-opentelemetry` subscriber for OTel export.
6//!
7//! # Usage
8//!
9//! ```no_run
10//! use neuron_otel::{OtelHook, OtelConfig};
11//!
12//! let hook = OtelHook::new(OtelConfig {
13//! capture_input: false,
14//! capture_output: false,
15//! });
16//! // Pass to AgentLoop::builder(...).hook(hook).build()
17//! ```
18//!
19//! # Span hierarchy
20//!
21//! | Span name | OTel convention | When |
22//! |-----------|-----------------|------|
23//! | `gen_ai.loop.iteration` | — | Each loop turn |
24//! | `gen_ai.chat` | `gen_ai.chat` | LLM request/response |
25//! | `gen_ai.execute_tool` | `gen_ai.execute_tool` | Tool execution |
26//! | `gen_ai.context.compaction` | — | Context compaction |
27//!
28//! # Opt-in content capture
29//!
30//! By default, request/response content is NOT captured (privacy).
31//! Set `capture_input` / `capture_output` to `true` to include message
32//! bodies in span attributes.
33//!
34//! [spec]: https://opentelemetry.io/docs/specs/semconv/gen-ai/
35
36use neuron_types::{HookAction, HookError, HookEvent, ObservabilityHook};
37
38/// Configuration for the OTel hook.
39#[derive(Debug, Clone, Default)]
40pub struct OtelConfig {
41 /// Whether to capture input message content in span attributes.
42 /// Disabled by default for privacy.
43 pub capture_input: bool,
44 /// Whether to capture output message content in span attributes.
45 /// Disabled by default for privacy.
46 pub capture_output: bool,
47}
48
49/// An [`ObservabilityHook`] that emits [`tracing`] spans following the
50/// OTel GenAI semantic conventions.
51///
52/// Always returns [`HookAction::Continue`] — observes but never controls.
53///
54/// # Attributes emitted
55///
56/// | Attribute | Value |
57/// |-----------|-------|
58/// | `gen_ai.system` | `"neuron"` |
59/// | `gen_ai.request.model` | Model from request |
60/// | `gen_ai.usage.input_tokens` | Input token count |
61/// | `gen_ai.usage.output_tokens` | Output token count |
62/// | `gen_ai.response.stop_reason` | Stop reason |
63/// | `gen_ai.tool.name` | Tool name |
64/// | `gen_ai.tool.is_error` | Whether tool returned an error |
65pub struct OtelHook {
66 config: OtelConfig,
67}
68
69impl OtelHook {
70 /// Create a new OTel hook with the given configuration.
71 #[must_use]
72 pub fn new(config: OtelConfig) -> Self {
73 Self { config }
74 }
75}
76
77impl Default for OtelHook {
78 fn default() -> Self {
79 Self::new(OtelConfig::default())
80 }
81}
82
83impl ObservabilityHook for OtelHook {
84 fn on_event(
85 &self,
86 event: HookEvent<'_>,
87 ) -> impl std::future::Future<Output = Result<HookAction, HookError>> + Send {
88 match &event {
89 HookEvent::LoopIteration { turn } => {
90 tracing::info_span!("gen_ai.loop.iteration", gen_ai.system = "neuron", turn)
91 .in_scope(|| {
92 tracing::debug!("loop iteration {turn}");
93 });
94 }
95 HookEvent::PreLlmCall { request } => {
96 let span = tracing::info_span!(
97 "gen_ai.chat",
98 gen_ai.system = "neuron",
99 gen_ai.request.model = %request.model,
100 gen_ai.request.messages = request.messages.len(),
101 gen_ai.request.tools = request.tools.len(),
102 );
103 span.in_scope(|| {
104 if self.config.capture_input {
105 tracing::debug!(
106 messages = ?request.messages.len(),
107 "gen_ai.chat request"
108 );
109 } else {
110 tracing::debug!("gen_ai.chat request");
111 }
112 });
113 }
114 HookEvent::PostLlmCall { response } => {
115 let span = tracing::info_span!(
116 "gen_ai.chat",
117 gen_ai.system = "neuron",
118 gen_ai.response.model = %response.model,
119 gen_ai.response.stop_reason = ?response.stop_reason,
120 gen_ai.usage.input_tokens = response.usage.input_tokens,
121 gen_ai.usage.output_tokens = response.usage.output_tokens,
122 );
123 span.in_scope(|| {
124 if self.config.capture_output {
125 tracing::debug!(
126 content_blocks = response.message.content.len(),
127 "gen_ai.chat response"
128 );
129 } else {
130 tracing::debug!("gen_ai.chat response");
131 }
132 });
133 }
134 HookEvent::PreToolExecution { tool_name, .. } => {
135 tracing::info_span!(
136 "gen_ai.execute_tool",
137 gen_ai.system = "neuron",
138 gen_ai.tool.name = %tool_name,
139 )
140 .in_scope(|| {
141 tracing::debug!("tool execution start");
142 });
143 }
144 HookEvent::PostToolExecution { tool_name, output } => {
145 tracing::info_span!(
146 "gen_ai.execute_tool",
147 gen_ai.system = "neuron",
148 gen_ai.tool.name = %tool_name,
149 gen_ai.tool.is_error = output.is_error,
150 )
151 .in_scope(|| {
152 tracing::debug!("tool execution complete");
153 });
154 }
155 HookEvent::ContextCompaction {
156 old_tokens,
157 new_tokens,
158 } => {
159 tracing::info_span!(
160 "gen_ai.context.compaction",
161 gen_ai.system = "neuron",
162 old_tokens,
163 new_tokens,
164 reduced_by = old_tokens - new_tokens,
165 )
166 .in_scope(|| {
167 tracing::info!("context compacted");
168 });
169 }
170 HookEvent::SessionStart { session_id } => {
171 tracing::info!(gen_ai.system = "neuron", session_id, "gen_ai.session.start");
172 }
173 HookEvent::SessionEnd { session_id } => {
174 tracing::info!(gen_ai.system = "neuron", session_id, "gen_ai.session.end");
175 }
176 }
177 std::future::ready(Ok(HookAction::Continue))
178 }
179}