use serde::{Deserialize, Serialize};
use crate::types::ids::GenerationId;
use crate::types::status::{CancellationStatus, StreamingStatus};
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct GenerationData {
pub id: GenerationId,
pub upstream_id: Option<String>,
pub total_cost: f64,
pub cache_discount: Option<f64>,
pub upstream_inference_cost: Option<f64>,
pub created_at: String,
pub model: String,
pub app_id: Option<i64>,
pub streamed: StreamingStatus,
pub cancelled: CancellationStatus,
pub provider_name: Option<String>,
pub latency: Option<i64>,
pub moderation_latency: Option<i64>,
pub generation_time: Option<i64>,
pub finish_reason: Option<String>,
pub native_finish_reason: Option<String>,
pub tokens_prompt: Option<i64>,
pub tokens_completion: Option<i64>,
pub native_tokens_prompt: Option<i64>,
pub native_tokens_completion: Option<i64>,
pub native_tokens_reasoning: Option<i64>,
pub num_media_prompt: Option<i64>,
pub num_media_completion: Option<i64>,
pub num_search_results: Option<i64>,
pub origin: String,
pub usage: f64,
pub is_byok: bool,
}
impl GenerationData {
pub fn total_tokens(&self) -> Option<i64> {
match (self.tokens_prompt, self.tokens_completion) {
(Some(prompt), Some(completion)) => Some(prompt + completion),
(Some(prompt), None) => Some(prompt),
(None, Some(completion)) => Some(completion),
(None, None) => None,
}
}
pub fn total_native_tokens(&self) -> Option<i64> {
let prompt = self.native_tokens_prompt.unwrap_or(0);
let completion = self.native_tokens_completion.unwrap_or(0);
let reasoning = self.native_tokens_reasoning.unwrap_or(0);
if prompt == 0 && completion == 0 && reasoning == 0 {
None
} else {
Some(prompt + completion + reasoning)
}
}
pub fn is_successful(&self) -> bool {
!self.cancelled.is_cancelled()
}
pub fn was_streamed(&self) -> bool {
self.streamed.is_active()
}
pub fn was_cancelled(&self) -> bool {
self.cancelled.is_cancelled()
}
pub fn effective_cost(&self) -> f64 {
self.total_cost - self.cache_discount.unwrap_or(0.0)
}
pub fn cost_per_token(&self) -> Option<f64> {
self.total_tokens()
.map(|tokens| self.total_cost / tokens as f64)
}
pub fn latency_seconds(&self) -> Option<f64> {
self.latency.map(|ms| ms as f64 / 1000.0)
}
pub fn generation_time_seconds(&self) -> Option<f64> {
self.generation_time.map(|ms| ms as f64 / 1000.0)
}
pub fn used_web_search(&self) -> bool {
self.num_search_results.unwrap_or(0) > 0
}
pub fn included_media(&self) -> bool {
(self.num_media_prompt.unwrap_or(0) + self.num_media_completion.unwrap_or(0)) > 0
}
pub fn used_reasoning(&self) -> bool {
self.native_tokens_reasoning.unwrap_or(0) > 0
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct GenerationResponse {
pub data: GenerationData,
}
impl GenerationResponse {
pub fn generation(&self) -> &GenerationData {
&self.data
}
pub fn id(&self) -> &str {
self.data.id.as_str()
}
pub fn model(&self) -> &str {
&self.data.model
}
pub fn total_cost(&self) -> f64 {
self.data.total_cost
}
pub fn effective_cost(&self) -> f64 {
self.data.effective_cost()
}
pub fn is_successful(&self) -> bool {
self.data.is_successful()
}
pub fn was_streamed(&self) -> bool {
self.data.was_streamed()
}
pub fn total_tokens(&self) -> Option<i64> {
self.data.total_tokens()
}
pub fn cost_per_token(&self) -> Option<f64> {
self.data.cost_per_token()
}
pub fn latency_seconds(&self) -> Option<f64> {
self.data.latency_seconds()
}
pub fn used_web_search(&self) -> bool {
self.data.used_web_search()
}
pub fn included_media(&self) -> bool {
self.data.included_media()
}
pub fn used_reasoning(&self) -> bool {
self.data.used_reasoning()
}
}
#[cfg(test)]
mod tests {
use super::*;
fn create_test_generation_data() -> GenerationData {
GenerationData {
id: GenerationId::new("gen-123456"),
upstream_id: Some("upstream-789".to_string()),
total_cost: 0.025,
cache_discount: Some(0.005),
upstream_inference_cost: Some(0.020),
created_at: "2024-01-15T10:30:00Z".to_string(),
model: "openai/gpt-4".to_string(),
app_id: Some(12345),
streamed: StreamingStatus::Complete,
cancelled: CancellationStatus::NotCancelled,
provider_name: Some("OpenAI".to_string()),
latency: Some(1500),
moderation_latency: Some(100),
generation_time: Some(1200),
finish_reason: Some("stop".to_string()),
native_finish_reason: Some("stop".to_string()),
tokens_prompt: Some(50),
tokens_completion: Some(100),
native_tokens_prompt: Some(50),
native_tokens_completion: Some(100),
native_tokens_reasoning: Some(25),
num_media_prompt: Some(2),
num_media_completion: Some(0),
num_search_results: Some(5),
origin: "api".to_string(),
usage: 0.025,
is_byok: false,
}
}
#[test]
fn test_generation_data_total_tokens() {
let data = create_test_generation_data();
assert_eq!(data.total_tokens(), Some(150)); }
#[test]
fn test_generation_data_total_native_tokens() {
let data = create_test_generation_data();
assert_eq!(data.total_native_tokens(), Some(175)); }
#[test]
fn test_generation_data_success_checks() {
let data = create_test_generation_data();
assert!(data.is_successful());
assert!(data.was_streamed());
assert!(!data.was_cancelled());
}
#[test]
fn test_generation_data_cost_calculations() {
let data = create_test_generation_data();
assert_eq!(data.effective_cost(), 0.020); assert_eq!(data.cost_per_token(), Some(0.025 / 150.0));
}
#[test]
fn test_generation_data_time_conversions() {
let data = create_test_generation_data();
assert_eq!(data.latency_seconds(), Some(1.5));
assert_eq!(data.generation_time_seconds(), Some(1.2));
}
#[test]
fn test_generation_data_feature_checks() {
let data = create_test_generation_data();
assert!(data.used_web_search());
assert!(data.included_media());
assert!(data.used_reasoning());
}
#[test]
fn test_generation_response_convenience_methods() {
let data = create_test_generation_data();
let response = GenerationResponse { data };
assert_eq!(response.id(), "gen-123456");
assert_eq!(response.model(), "openai/gpt-4");
assert_eq!(response.total_cost(), 0.025);
assert_eq!(response.effective_cost(), 0.020);
assert!(response.is_successful());
assert!(response.was_streamed());
assert_eq!(response.total_tokens(), Some(150));
assert!(response.used_web_search());
assert!(response.included_media());
assert!(response.used_reasoning());
}
#[test]
fn test_generation_data_edge_cases() {
let minimal_data = GenerationData {
id: GenerationId::new("gen-minimal"),
upstream_id: None,
total_cost: 0.01,
cache_discount: None,
upstream_inference_cost: None,
created_at: "2024-01-15T10:30:00Z".to_string(),
model: "openai/gpt-3.5-turbo".to_string(),
app_id: None,
streamed: StreamingStatus::default(),
cancelled: CancellationStatus::default(),
provider_name: None,
latency: None,
moderation_latency: None,
generation_time: None,
finish_reason: None,
native_finish_reason: None,
tokens_prompt: None,
tokens_completion: None,
native_tokens_prompt: None,
native_tokens_completion: None,
native_tokens_reasoning: None,
num_media_prompt: None,
num_media_completion: None,
num_search_results: None,
origin: "api".to_string(),
usage: 0.01,
is_byok: false,
};
assert_eq!(minimal_data.total_tokens(), None);
assert_eq!(minimal_data.total_native_tokens(), None);
assert!(minimal_data.is_successful());
assert!(!minimal_data.was_streamed());
assert!(!minimal_data.was_cancelled());
assert_eq!(minimal_data.effective_cost(), 0.01);
assert_eq!(minimal_data.cost_per_token(), None);
assert!(!minimal_data.used_web_search());
assert!(!minimal_data.included_media());
assert!(!minimal_data.used_reasoning());
}
#[test]
fn test_generation_serialization() {
let data = create_test_generation_data();
let json = serde_json::to_string(&data).unwrap();
let parsed: GenerationData = serde_json::from_str(&json).unwrap();
assert_eq!(data, parsed);
}
#[test]
fn test_generation_response_serialization() {
let data = create_test_generation_data();
let response = GenerationResponse { data };
let json = serde_json::to_string(&response).unwrap();
let parsed: GenerationResponse = serde_json::from_str(&json).unwrap();
assert_eq!(response, parsed);
}
#[test]
fn test_generation_id_serialization() {
let generation = GenerationData {
id: GenerationId::new("gen-12345"),
upstream_id: Some("upstream-789".to_string()),
total_cost: 0.025,
cache_discount: Some(0.005),
upstream_inference_cost: Some(0.020),
created_at: "2024-01-15T10:30:00Z".to_string(),
model: "openai/gpt-4".to_string(),
app_id: Some(12345),
streamed: StreamingStatus::Complete,
cancelled: CancellationStatus::NotCancelled,
provider_name: Some("OpenAI".to_string()),
latency: Some(1500),
moderation_latency: Some(100),
generation_time: Some(1200),
finish_reason: Some("stop".to_string()),
native_finish_reason: Some("stop".to_string()),
tokens_prompt: Some(50),
tokens_completion: Some(100),
native_tokens_prompt: Some(50),
native_tokens_completion: Some(100),
native_tokens_reasoning: Some(25),
num_media_prompt: Some(2),
num_media_completion: Some(0),
num_search_results: Some(5),
origin: "api".to_string(),
usage: 0.025,
is_byok: false,
};
let json = serde_json::to_string(&generation).unwrap();
assert!(json.contains("\"gen-12345\""));
let deserialized: GenerationData = serde_json::from_str(&json).unwrap();
assert_eq!(deserialized.id.as_str(), "gen-12345");
}
#[test]
fn test_generation_id_from_string() {
let id: GenerationId = "string-id".into();
assert_eq!(id.as_str(), "string-id");
let id2: GenerationId = String::from("string-id-2").into();
assert_eq!(id2.as_str(), "string-id-2");
}
#[test]
fn test_generation_id_display() {
let id = GenerationId::new("test-display");
assert_eq!(format!("{}", id), "test-display");
}
#[test]
fn test_generation_id_hash() {
use std::collections::HashSet;
let mut set = HashSet::new();
set.insert(GenerationId::new("id-1"));
set.insert(GenerationId::new("id-2"));
set.insert(GenerationId::new("id-1"));
assert_eq!(set.len(), 2); }
}