Skip to main content

rig/telemetry/
mod.rs

1//! This module primarily concerns being able to orchestrate telemetry across a given pipeline or workflow.
2//! This includes tracing, being able to send traces to an OpenTelemetry collector, setting up your
3//! agents with the correct tracing style so you can emit the right traces for platforms like Langfuse,
4//! and more.
5
6use crate::completion::GetTokenUsage;
7use serde::Serialize;
8
9pub trait ProviderRequestExt {
10    type InputMessage: Serialize;
11
12    fn get_input_messages(&self) -> Vec<Self::InputMessage>;
13    fn get_system_prompt(&self) -> Option<String>;
14    fn get_model_name(&self) -> String;
15    fn get_prompt(&self) -> Option<String>;
16}
17
18pub trait ProviderResponseExt {
19    type OutputMessage: Serialize;
20    type Usage: Serialize;
21
22    fn get_response_id(&self) -> Option<String>;
23
24    fn get_response_model_name(&self) -> Option<String>;
25
26    fn get_output_messages(&self) -> Vec<Self::OutputMessage>;
27
28    fn get_text_response(&self) -> Option<String>;
29
30    fn get_usage(&self) -> Option<Self::Usage>;
31}
32
33/// A trait designed specifically to be used with Spans for the purpose of recording telemetry.
34/// Nearly all methods
35pub trait SpanCombinator {
36    fn record_token_usage<U>(&self, usage: &U)
37    where
38        U: GetTokenUsage;
39
40    fn record_response_metadata<R>(&self, response: &R)
41    where
42        R: ProviderResponseExt;
43
44    fn record_model_input<T>(&self, messages: &T)
45    where
46        T: Serialize;
47
48    fn record_model_output<T>(&self, messages: &T)
49    where
50        T: Serialize;
51}
52
53impl SpanCombinator for tracing::Span {
54    fn record_token_usage<U>(&self, usage: &U)
55    where
56        U: GetTokenUsage,
57    {
58        if self.is_disabled() {
59            return;
60        }
61
62        if let Some(usage) = usage.token_usage() {
63            self.record("gen_ai.usage.input_tokens", usage.input_tokens);
64            self.record("gen_ai.usage.output_tokens", usage.output_tokens);
65            self.record("gen_ai.usage.cached_tokens", usage.cached_input_tokens);
66        }
67    }
68
69    fn record_response_metadata<R>(&self, response: &R)
70    where
71        R: ProviderResponseExt,
72    {
73        if self.is_disabled() {
74            return;
75        }
76
77        if let Some(id) = response.get_response_id() {
78            self.record("gen_ai.response.id", id);
79        }
80
81        if let Some(model_name) = response.get_response_model_name() {
82            self.record("gen_ai.response.model_name", model_name);
83        }
84    }
85
86    fn record_model_input<T>(&self, input: &T)
87    where
88        T: Serialize,
89    {
90        if self.is_disabled() {
91            return;
92        }
93
94        let input_as_json_string =
95            serde_json::to_string(input).expect("Serializing a Rust type to JSON should not break");
96
97        self.record("gen_ai.input.messages", input_as_json_string);
98    }
99
100    fn record_model_output<T>(&self, output: &T)
101    where
102        T: Serialize,
103    {
104        if self.is_disabled() {
105            return;
106        }
107
108        let output_as_json_string = serde_json::to_string(output)
109            .expect("Serializing a Rust type to JSON should not break");
110
111        self.record("gen_ai.output.messages", output_as_json_string);
112    }
113}