aidale_layer/
logging.rs

1//! Logging layer for provider operations.
2
3use aidale_core::error::AiError;
4use aidale_core::layer::{Layer, LayeredProvider};
5use aidale_core::provider::{ChatCompletionStream, Provider};
6use aidale_core::types::*;
7use async_trait::async_trait;
8use std::fmt::Debug;
9use std::sync::Arc;
10
11/// Logging layer that logs provider operations.
12#[derive(Debug, Clone)]
13pub struct LoggingLayer {
14    prefix: String,
15}
16
17impl LoggingLayer {
18    /// Create a new logging layer
19    pub fn new() -> Self {
20        Self {
21            prefix: "[AI Core]".to_string(),
22        }
23    }
24
25    /// Create a logging layer with custom prefix
26    pub fn with_prefix(prefix: impl Into<String>) -> Self {
27        Self {
28            prefix: prefix.into(),
29        }
30    }
31}
32
33impl Default for LoggingLayer {
34    fn default() -> Self {
35        Self::new()
36    }
37}
38
39impl<P: Provider> Layer<P> for LoggingLayer {
40    type LayeredProvider = LoggingProvider<P>;
41
42    fn layer(&self, inner: P) -> Self::LayeredProvider {
43        LoggingProvider {
44            inner,
45            prefix: self.prefix.clone(),
46        }
47    }
48}
49
50/// Provider wrapped with logging
51#[derive(Debug)]
52pub struct LoggingProvider<P> {
53    inner: P,
54    prefix: String,
55}
56
57#[async_trait]
58impl<P: Provider> LayeredProvider for LoggingProvider<P> {
59    type Inner = P;
60
61    fn inner(&self) -> &Self::Inner {
62        &self.inner
63    }
64
65    async fn layered_chat_completion(
66        &self,
67        req: ChatCompletionRequest,
68    ) -> Result<ChatCompletionResponse, AiError> {
69        tracing::debug!(
70            "{} chat_completion request: model={}, messages={}",
71            self.prefix,
72            req.model,
73            req.messages.len()
74        );
75
76        let start = std::time::Instant::now();
77        let result = self.inner.chat_completion(req).await;
78        let elapsed = start.elapsed();
79
80        match &result {
81            Ok(response) => {
82                tracing::debug!(
83                    "{} chat_completion success: id={}, tokens={}, elapsed={:?}",
84                    self.prefix,
85                    response.id,
86                    response.usage.total_tokens,
87                    elapsed
88                );
89            }
90            Err(e) => {
91                tracing::error!(
92                    "{} chat_completion error: {:?}, elapsed={:?}",
93                    self.prefix,
94                    e,
95                    elapsed
96                );
97            }
98        }
99
100        result
101    }
102
103    async fn layered_stream_chat_completion(
104        &self,
105        req: ChatCompletionRequest,
106    ) -> Result<Box<ChatCompletionStream>, AiError> {
107        tracing::debug!(
108            "{} stream_chat_completion request: model={}, messages={}",
109            self.prefix,
110            req.model,
111            req.messages.len()
112        );
113
114        let start = std::time::Instant::now();
115        let result = self.inner.stream_chat_completion(req).await;
116        let elapsed = start.elapsed();
117
118        match &result {
119            Ok(_) => {
120                tracing::debug!(
121                    "{} stream_chat_completion success, elapsed={:?}",
122                    self.prefix,
123                    elapsed
124                );
125            }
126            Err(e) => {
127                tracing::error!(
128                    "{} stream_chat_completion error: {:?}, elapsed={:?}",
129                    self.prefix,
130                    e,
131                    elapsed
132                );
133            }
134        }
135
136        result
137    }
138}
139
140#[async_trait]
141impl<P: Provider> Provider for LoggingProvider<P> {
142    fn info(&self) -> Arc<ProviderInfo> {
143        LayeredProvider::layered_info(self)
144    }
145
146    async fn chat_completion(
147        &self,
148        req: ChatCompletionRequest,
149    ) -> Result<ChatCompletionResponse, AiError> {
150        LayeredProvider::layered_chat_completion(self, req).await
151    }
152
153    async fn stream_chat_completion(
154        &self,
155        req: ChatCompletionRequest,
156    ) -> Result<Box<ChatCompletionStream>, AiError> {
157        LayeredProvider::layered_stream_chat_completion(self, req).await
158    }
159}