use std::collections::HashMap;
use std::time::Duration;
use serde_json::{json, Value};
use spectracost::{openai::ChatRequest, anthropic::MessagesRequest, Options, Spectracost};
use wiremock::matchers::{method, path};
use wiremock::{Mock, MockServer, ResponseTemplate};
async fn received_events(ingest: &MockServer) -> Vec<Value> {
for _ in 0..60 {
let reqs = ingest.received_requests().await.unwrap_or_default();
let events: Vec<Value> = reqs
.iter()
.flat_map(|r| {
serde_json::from_slice::<Vec<Value>>(&r.body).unwrap_or_default()
})
.collect();
if !events.is_empty() {
return events;
}
tokio::time::sleep(Duration::from_millis(50)).await;
}
vec![]
}
#[tokio::test]
async fn openai_chat_emits_telemetry() {
let openai = MockServer::start().await;
let ingest = MockServer::start().await;
Mock::given(method("POST"))
.and(path("/v1/chat/completions"))
.respond_with(ResponseTemplate::new(200).set_body_json(json!({
"id": "chatcmpl_test",
"model": "gpt-4o-mini",
"choices": [{
"index": 0,
"message": {"role": "assistant", "content": "hi"},
"finish_reason": "stop"
}],
"usage": {
"prompt_tokens": 120,
"completion_tokens": 45,
"total_tokens": 165
}
})))
.mount(&openai)
.await;
Mock::given(method("POST"))
.and(path("/v1/events"))
.respond_with(ResponseTemplate::new(202))
.mount(&ingest)
.await;
let sc = Spectracost::new(Options {
api_key: "sprc_test".into(),
endpoint: Some(ingest.uri()),
openai_api_key: Some("sk-test".into()),
openai_base_url: Some(openai.uri()),
team: Some("search".into()),
service: Some("query-rewriter".into()),
..Default::default()
});
let resp = sc
.openai_chat(ChatRequest {
model: "gpt-4o-mini".into(),
messages: vec![("user", "hello").into()],
max_tokens: Some(50),
})
.await
.expect("openai_chat ok");
assert_eq!(resp.first_message().as_deref(), Some("hi"));
let events = received_events(&ingest).await;
assert!(!events.is_empty(), "expected at least one event");
let ev = &events[0];
assert_eq!(ev["provider"], "openai");
assert_eq!(ev["model"], "gpt-4o-mini");
assert_eq!(ev["endpoint"], "chat.completions");
assert_eq!(ev["input_tokens"], 120);
assert_eq!(ev["output_tokens"], 45);
assert_eq!(ev["team"], "search");
assert_eq!(ev["service"], "query-rewriter");
}
#[tokio::test]
async fn openai_chat_detects_groq_from_base_url() {
let mock = MockServer::start().await;
let ingest = MockServer::start().await;
Mock::given(method("POST"))
.and(path("/v1/chat/completions"))
.respond_with(ResponseTemplate::new(200).set_body_json(json!({
"choices": [{"message": {"role": "assistant", "content": "hi"}}],
"usage": {"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15}
})))
.mount(&mock)
.await;
Mock::given(method("POST"))
.and(path("/v1/events"))
.respond_with(ResponseTemplate::new(202))
.mount(&ingest)
.await;
let _ = (mock, ingest);
}
#[tokio::test]
async fn anthropic_messages_emits_telemetry() {
let anthropic = MockServer::start().await;
let ingest = MockServer::start().await;
Mock::given(method("POST"))
.and(path("/v1/messages"))
.respond_with(ResponseTemplate::new(200).set_body_json(json!({
"id": "msg_test",
"model": "claude-haiku-4-5-20251001",
"content": [{"type": "text", "text": "hi"}],
"usage": {"input_tokens": 200, "output_tokens": 80}
})))
.mount(&anthropic)
.await;
Mock::given(method("POST"))
.and(path("/v1/events"))
.respond_with(ResponseTemplate::new(202))
.mount(&ingest)
.await;
let mut tags = HashMap::new();
tags.insert("origin".into(), "test".into());
let sc = Spectracost::new(Options {
api_key: "sprc_test".into(),
endpoint: Some(ingest.uri()),
anthropic_api_key: Some("sk-ant-test".into()),
anthropic_base_url: Some(anthropic.uri()),
team: Some("support".into()),
tags: Some(tags),
..Default::default()
});
let resp = sc
.anthropic_messages(MessagesRequest {
model: "claude-haiku-4-5-20251001".into(),
max_tokens: 50,
messages: vec![("user", "hello").into()],
})
.await
.expect("anthropic ok");
assert_eq!(resp.first_text().as_deref(), Some("hi"));
let events = received_events(&ingest).await;
assert!(!events.is_empty());
let ev = &events[0];
assert_eq!(ev["provider"], "anthropic");
assert_eq!(ev["endpoint"], "messages");
assert_eq!(ev["input_tokens"], 200);
assert_eq!(ev["output_tokens"], 80);
assert_eq!(ev["team"], "support");
assert_eq!(ev["tags"]["origin"], "test");
}