use crate::adapter::adapters::support::get_api_key;
use crate::adapter::anthropic::AnthropicStreamer;
use crate::adapter::{Adapter, AdapterKind, ServiceType, WebRequestData};
use crate::chat::{
Binary, BinarySource, CacheControl, CacheCreationDetails, ChatOptionsSet, ChatRequest, ChatResponse,
ChatResponseFormat, ChatRole, ChatStream, ChatStreamResponse, ContentPart, MessageContent, PromptTokensDetails,
ReasoningEffort, StopReason, Tool, ToolCall, ToolConfig, ToolName, Usage,
};
use crate::resolver::{AuthData, Endpoint};
use crate::webc::{EventSourceStream, WebResponse};
use crate::{Headers, ModelIden};
use crate::{Result, ServiceTarget};
use reqwest::RequestBuilder;
use serde_json::{Map, Value, json};
use tracing::info;
use tracing::warn;
use value_ext::JsonValueExt;
pub struct AnthropicAdapter;
const REASONING_LOW: u32 = 1024;
const REASONING_MEDIUM: u32 = 8000;
const REASONING_HIGH: u32 = 24000;
const SUPPORT_EFFORT_MODELS: &[&str] = &["claude-opus-4-6", "claude-sonnet-4-6", "claude-opus-4-5"];
const SUPPORT_REASONING_MAX_MODELS: &[&str] = &["claude-opus-4-6"];
const SUPPORT_ADAPTTIVE_THINK_MODELS: &[&str] = &["claude-opus-4-6", "claude-sonnet-4-6"];
fn has_model(model_prefixes: &[&str], model_name: &str) -> bool {
model_prefixes.iter().any(|prefix| model_name.contains(prefix))
}
fn insert_anthropic_reasoning(
payload: &mut Value,
output_config: &mut Map<String, Value>,
model_name: &str,
effort: &ReasoningEffort,
) -> Result<()> {
let mut budget: Option<u32> = None;
let support_effort = has_model(SUPPORT_EFFORT_MODELS, model_name);
let support_reasoning_max = has_model(SUPPORT_REASONING_MAX_MODELS, model_name);
let support_adaptive = has_model(SUPPORT_ADAPTTIVE_THINK_MODELS, model_name);
if support_effort {
let effort = match effort {
ReasoningEffort::Minimal => "low",
ReasoningEffort::Low => "low",
ReasoningEffort::Medium => "medium",
ReasoningEffort::High => "high",
ReasoningEffort::Max | ReasoningEffort::XHigh if support_reasoning_max => "max",
ReasoningEffort::XHigh => "high",
ReasoningEffort::Max => "high",
ReasoningEffort::Budget(val) => {
budget = Some(*val); ""
}
ReasoningEffort::None => "",
};
if !effort.is_empty() {
output_config.insert("effort".to_string(), json!(effort));
}
}
if support_adaptive {
let thinking = match budget {
Some(budget) => json!({
"type": "adaptive",
"budget_tokens": budget }),
None => json!({
"type": "adaptive"}),
};
payload.x_insert("thinking", thinking)?;
}
if !support_effort {
let thinking_budget = match effort {
ReasoningEffort::None => None,
ReasoningEffort::Budget(budget) => Some(*budget),
ReasoningEffort::Low | ReasoningEffort::Minimal => Some(REASONING_LOW),
ReasoningEffort::Medium => Some(REASONING_MEDIUM),
ReasoningEffort::High | ReasoningEffort::Max | ReasoningEffort::XHigh => Some(REASONING_HIGH),
};
if let Some(thinking_budget) = thinking_budget {
payload.x_insert(
"thinking",
json!({
"type": "enabled",
"budget_tokens": thinking_budget
}),
)?;
}
}
Ok(())
}
pub(in crate::adapter) const MAX_TOKENS_64K: u32 = 64000; pub(in crate::adapter) const MAX_TOKENS_32K: u32 = 32000; pub(in crate::adapter) const MAX_TOKENS_8K: u32 = 8192; pub(in crate::adapter) const MAX_TOKENS_4K: u32 = 4096;
const ANTHROPIC_VERSION: &str = "2023-06-01";
impl AnthropicAdapter {
pub const API_KEY_DEFAULT_ENV_NAME: &str = "ANTHROPIC_API_KEY";
pub(in crate::adapter::adapters) async fn list_model_names_for_end_target(
kind: AdapterKind,
endpoint: Endpoint,
auth: AuthData,
) -> Result<Vec<String>> {
let base_url = endpoint.base_url();
let url = format!("{base_url}models");
let api_key = auth.single_key_value().ok();
let headers = api_key
.map(|api_key| {
Headers::from(vec![
("x-api-key".to_string(), api_key),
("anthropic-version".to_string(), ANTHROPIC_VERSION.to_string()),
])
})
.unwrap_or_default();
let web_c = crate::webc::WebClient::default();
let mut res = web_c
.do_get(&url, &headers)
.await
.map_err(|webc_error| crate::Error::WebAdapterCall {
adapter_kind: kind,
webc_error,
})?;
let mut models: Vec<String> = Vec::new();
if let Value::Array(models_value) = res.body.x_take("data")? {
for mut model in models_value {
let model_name: String = model.x_take("id")?;
models.push(model_name);
}
}
Ok(models)
}
}
impl Adapter for AnthropicAdapter {
const DEFAULT_API_KEY_ENV_NAME: Option<&'static str> = Some(Self::API_KEY_DEFAULT_ENV_NAME);
fn default_endpoint() -> Endpoint {
const BASE_URL: &str = "https://api.anthropic.com/v1/";
Endpoint::from_static(BASE_URL)
}
fn default_auth() -> AuthData {
match Self::DEFAULT_API_KEY_ENV_NAME {
Some(env_name) => AuthData::from_env(env_name),
None => AuthData::None,
}
}
async fn all_model_names(kind: AdapterKind, endpoint: Endpoint, auth: AuthData) -> Result<Vec<String>> {
Self::list_model_names_for_end_target(kind, endpoint, auth).await
}
fn get_service_url(_model: &ModelIden, service_type: ServiceType, endpoint: Endpoint) -> Result<String> {
let base_url = endpoint.base_url();
let url = match service_type {
ServiceType::Chat | ServiceType::ChatStream => format!("{base_url}messages"),
ServiceType::Embed => format!("{base_url}embeddings"), };
Ok(url)
}
fn to_web_request_data(
target: ServiceTarget,
service_type: ServiceType,
chat_req: ChatRequest,
options_set: ChatOptionsSet<'_, '_>,
) -> Result<WebRequestData> {
let ServiceTarget { endpoint, auth, model } = target;
let api_key = get_api_key(auth, &model)?;
let url = Self::get_service_url(&model, service_type, endpoint)?;
let headers = Headers::from(vec![
("x-api-key".to_string(), api_key),
("anthropic-version".to_string(), ANTHROPIC_VERSION.to_string()),
]);
let AnthropicRequestParts {
system,
messages,
tools,
} = Self::into_anthropic_request_parts(chat_req)?;
let (_, raw_model_name) = model.model_name.namespace_and_name();
let (model_name, computed_reasoning_effort) = match (raw_model_name, options_set.reasoning_effort()) {
(model, None) => {
if let Some((prefix, last)) = raw_model_name.rsplit_once('-') {
let reasoning = match last {
"zero" => None,
"None" => Some(ReasoningEffort::Low),
"minimal" => Some(ReasoningEffort::Low),
"low" => Some(ReasoningEffort::Low),
"medium" => Some(ReasoningEffort::Medium),
"high" => Some(ReasoningEffort::High),
"xhigh" => Some(ReasoningEffort::XHigh),
"max" => Some(ReasoningEffort::Max),
_ => None,
};
let model = if reasoning.is_some() { prefix } else { model };
(model, reasoning)
} else {
(model, None)
}
}
(model, Some(effort)) => (model, Some(effort.clone())),
};
let stream = matches!(service_type, ServiceType::ChatStream);
let mut payload = json!({
"model": model_name.to_string(),
"messages": messages,
"stream": stream
});
if let Some(system) = system {
payload.x_insert("system", system)?;
}
if let Some(tools) = tools {
payload.x_insert("/tools", tools)?;
}
let mut output_config: Map<String, Value> = Map::new();
if let Some(computed_reasoning_effort) = computed_reasoning_effort {
insert_anthropic_reasoning(&mut payload, &mut output_config, model_name, &computed_reasoning_effort)?;
}
if let Some(cache_control) = options_set.cache_control() {
info!(
"Anthropic request-level cache_control '{cache_control:?}' is currently ignored. Use message-level cache_control instead."
);
}
if let Some(ChatResponseFormat::JsonSpec(st_json)) = options_set.response_format() {
output_config.insert(
"format".to_string(),
json!({
"type": "json_schema",
"schema": st_json.schema_with_additional_properties_false(),
}),
);
}
if !output_config.is_empty() {
payload.x_insert("output_config", Value::Object(output_config))?;
}
if let Some(temperature) = options_set.temperature() {
payload.x_insert("temperature", temperature)?;
}
if !options_set.stop_sequences().is_empty() {
payload.x_insert("stop_sequences", options_set.stop_sequences())?;
}
let max_tokens = Self::resolve_max_tokens(model_name, &options_set);
payload.x_insert("max_tokens", max_tokens)?;
if let Some(top_p) = options_set.top_p() {
payload.x_insert("top_p", top_p)?;
}
Ok(WebRequestData { url, headers, payload })
}
fn to_chat_response(
model_iden: ModelIden,
web_response: WebResponse,
_options_set: ChatOptionsSet<'_, '_>,
) -> Result<ChatResponse> {
let WebResponse { mut body, .. } = web_response;
let provider_model_name: Option<String> = body.x_remove("model").ok();
let provider_model_iden = model_iden.from_optional_name(provider_model_name);
let usage = body.x_take::<Value>("usage");
let usage = usage.map(Self::into_usage).unwrap_or_default();
let stop_reason = body
.x_take::<Option<String>>("stop_reason")
.ok()
.flatten()
.map(StopReason::from);
let mut content: MessageContent = MessageContent::default();
let json_content_items: Vec<Value> = body.x_take("content")?;
let mut reasoning_content: Vec<String> = Vec::new();
for mut item in json_content_items {
let typ: String = item.x_take("type")?;
match typ.as_ref() {
"text" => {
let part = ContentPart::from_text(item.x_take::<String>("text")?);
content.push(part);
}
"thinking" => reasoning_content.push(item.x_take("thinking")?),
"tool_use" => {
let call_id = item.x_take::<String>("id")?;
let fn_name = item.x_take::<String>("name")?;
let fn_arguments = item.x_take::<Value>("input").unwrap_or_default();
let tool_call = ToolCall {
call_id,
fn_name,
fn_arguments,
thought_signatures: None,
};
let part = ContentPart::ToolCall(tool_call);
content.push(part);
}
other_typ => {
item.x_insert("type", other_typ)?;
content.push(ContentPart::from_custom(item, Some(model_iden.clone())))
}
}
}
let reasoning_content = if !reasoning_content.is_empty() {
Some(reasoning_content.join("\n"))
} else {
None
};
Ok(ChatResponse {
content,
reasoning_content,
model_iden,
provider_model_iden,
stop_reason,
usage,
captured_raw_body: None, response_id: None,
})
}
fn to_chat_stream(
model_iden: ModelIden,
reqwest_builder: RequestBuilder,
options_set: ChatOptionsSet<'_, '_>,
) -> Result<ChatStreamResponse> {
let event_source = EventSourceStream::new(reqwest_builder);
let anthropic_stream = AnthropicStreamer::new(event_source, model_iden.clone(), options_set);
let chat_stream = ChatStream::from_inter_stream(anthropic_stream);
Ok(ChatStreamResponse {
model_iden,
stream: chat_stream,
})
}
fn to_embed_request_data(
_service_target: crate::ServiceTarget,
_embed_req: crate::embed::EmbedRequest,
_options_set: crate::embed::EmbedOptionsSet<'_, '_>,
) -> Result<crate::adapter::WebRequestData> {
Err(crate::Error::AdapterNotSupported {
adapter_kind: crate::adapter::AdapterKind::Anthropic,
feature: "embeddings".to_string(),
})
}
fn to_embed_response(
_model_iden: crate::ModelIden,
_web_response: crate::webc::WebResponse,
_options_set: crate::embed::EmbedOptionsSet<'_, '_>,
) -> Result<crate::embed::EmbedResponse> {
Err(crate::Error::AdapterNotSupported {
adapter_kind: crate::adapter::AdapterKind::Anthropic,
feature: "embeddings".to_string(),
})
}
}
impl AnthropicAdapter {
pub(in crate::adapter) fn resolve_max_tokens(model_name: &str, options_set: &ChatOptionsSet) -> u32 {
options_set.max_tokens().unwrap_or_else(|| {
if model_name.contains("claude-sonnet")
|| model_name.contains("claude-haiku")
|| model_name.contains("claude-3-7-sonnet")
|| model_name.contains("claude-opus-4-5")
{
MAX_TOKENS_64K
} else if model_name.contains("claude-opus-4") {
MAX_TOKENS_32K
} else if model_name.contains("claude-3-5") {
MAX_TOKENS_8K
} else if model_name.contains("3-opus") || model_name.contains("3-haiku") {
MAX_TOKENS_4K
}
else {
MAX_TOKENS_64K
}
})
}
pub(in crate::adapter) fn into_usage(mut usage_value: Value) -> Usage {
let input_tokens: i32 = usage_value.x_take("input_tokens").ok().unwrap_or(0);
let cache_creation_input_tokens: i32 = usage_value.x_take("cache_creation_input_tokens").unwrap_or(0);
let cache_read_input_tokens: i32 = usage_value.x_take("cache_read_input_tokens").unwrap_or(0);
let completion_tokens: i32 = usage_value.x_take("output_tokens").ok().unwrap_or(0);
let cache_creation_details = usage_value.get("cache_creation").and_then(parse_cache_creation_details);
let prompt_tokens = input_tokens + cache_creation_input_tokens + cache_read_input_tokens;
let total_tokens = prompt_tokens + completion_tokens;
let prompt_tokens_details =
if cache_creation_input_tokens > 0 || cache_read_input_tokens > 0 || cache_creation_details.is_some() {
Some(PromptTokensDetails {
cache_creation_tokens: Some(cache_creation_input_tokens),
cache_creation_details,
cached_tokens: Some(cache_read_input_tokens),
audio_tokens: None,
})
} else {
None
};
Usage {
prompt_tokens: Some(prompt_tokens),
prompt_tokens_details,
completion_tokens: Some(completion_tokens),
completion_tokens_details: None,
total_tokens: Some(total_tokens),
}
}
pub(in crate::adapter) fn into_anthropic_request_parts(chat_req: ChatRequest) -> Result<AnthropicRequestParts> {
let mut messages: Vec<Value> = Vec::new();
let mut systems: Vec<(String, Option<CacheControl>)> = Vec::new();
let mut seen_5m_cache = false;
if let Some(system) = chat_req.system {
systems.push((system, None));
}
for msg in chat_req.messages {
let cache_control = msg.options.and_then(|o| o.cache_control);
if let Some(ref cc) = cache_control {
match cc {
CacheControl::Memory | CacheControl::Ephemeral | CacheControl::Ephemeral5m => {
seen_5m_cache = true;
}
CacheControl::Ephemeral1h | CacheControl::Ephemeral24h => {
if seen_5m_cache {
warn!(
"Anthropic cache TTL ordering violation: Ephemeral1h appears after Ephemeral/Ephemeral5m. \
1-hour cache entries must appear before 5-minute cache entries. \
See: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching#mixing-different-ttls"
);
}
}
}
}
match msg.role {
ChatRole::System => {
if let Some(system_text) = msg.content.joined_texts() {
systems.push((system_text, cache_control));
}
}
ChatRole::User => {
if msg.content.is_text_only() {
let text = msg.content.joined_texts().unwrap_or_else(String::new);
let content = apply_cache_control_to_text(cache_control.as_ref(), text);
messages.push(json!({"role": "user", "content": content}));
} else {
let mut values: Vec<Value> = Vec::new();
for part in msg.content {
match part {
ContentPart::Text(text) => {
values.push(json!({"type": "text", "text": text}));
}
ContentPart::Binary(binary) => {
let is_image = binary.is_image();
let Binary {
content_type, source, ..
} = binary;
if is_image {
match &source {
BinarySource::Url(_) => {
warn!(
"Anthropic doesn't support images from URL, need to handle it gracefully"
);
}
BinarySource::Base64(content) => {
values.push(json!({
"type": "image",
"source": {
"type": "base64",
"media_type": content_type,
"data": content,
}
}));
}
}
} else {
match &source {
BinarySource::Url(url) => {
values.push(json!({
"type": "document",
"source": {
"type": "url",
"url": url,
}
}));
}
BinarySource::Base64(b64) => {
values.push(json!({
"type": "document",
"source": {
"type": "base64",
"media_type": content_type,
"data": b64,
}
}));
}
}
}
}
ContentPart::ToolCall(_tc) => {}
ContentPart::ToolResponse(tool_response) => {
values.push(json!({
"type": "tool_result",
"content": tool_response.content,
"tool_use_id": tool_response.call_id,
}));
}
ContentPart::ThoughtSignature(_) => {}
ContentPart::ReasoningContent(_) => {}
ContentPart::Custom(_) => {}
}
}
let values = apply_cache_control_to_parts(cache_control.as_ref(), values);
messages.push(json!({"role": "user", "content": values}));
}
}
ChatRole::Assistant => {
let mut values: Vec<Value> = Vec::new();
let mut has_tool_use = false;
let mut has_text = false;
for part in msg.content {
match part {
ContentPart::Text(text) => {
has_text = true;
values.push(json!({"type": "text", "text": text}));
}
ContentPart::ToolCall(tool_call) => {
has_tool_use = true;
let input = if tool_call.fn_arguments.is_null() {
Value::Object(Map::new())
} else {
tool_call.fn_arguments
};
values.push(json!({
"type": "tool_use",
"id": tool_call.call_id,
"name": tool_call.fn_name,
"input": input,
}));
}
ContentPart::Binary(_) => {}
ContentPart::ToolResponse(_) => {}
ContentPart::ThoughtSignature(_) => {}
ContentPart::ReasoningContent(_) => {}
ContentPart::Custom(_) => {}
}
}
if !has_tool_use && has_text && cache_control.is_none() && values.len() == 1 {
let text = values
.first()
.and_then(|v| v.get("text"))
.and_then(|v| v.as_str())
.unwrap_or_default()
.to_string();
let content = apply_cache_control_to_text(None, text);
messages.push(json!({"role": "assistant", "content": content}));
} else {
let values = apply_cache_control_to_parts(cache_control.as_ref(), values);
messages.push(json!({"role": "assistant", "content": values}));
}
}
ChatRole::Tool => {
let mut values: Vec<Value> = Vec::new();
for part in msg.content {
if let ContentPart::ToolResponse(tool_response) = part {
values.push(json!({
"type": "tool_result",
"content": tool_response.content,
"tool_use_id": tool_response.call_id,
}));
}
}
if !values.is_empty() {
let values = apply_cache_control_to_parts(cache_control.as_ref(), values);
messages.push(json!({"role": "user", "content": values}));
}
}
}
}
let system = if !systems.is_empty() {
let has_any_cache = systems.iter().any(|(_, cc)| cc.is_some());
let system: Value = if has_any_cache {
let parts: Vec<Value> = systems
.iter()
.map(|(content, cc)| {
if let Some(cc) = cc {
json!({"type": "text", "text": content, "cache_control": cache_control_to_json(cc)})
} else {
json!({"type": "text", "text": content})
}
})
.collect();
json!(parts)
} else {
let content_buff = systems.iter().map(|(content, _)| content.as_str()).collect::<Vec<&str>>();
let content = content_buff.join("\n\n");
json!(content)
};
Some(system)
} else {
None
};
let tools: Option<Vec<Value>> = chat_req
.tools
.map(|tools| {
tools
.into_iter()
.map(Self::tool_to_anthropic_tool)
.collect::<Result<Vec<Value>>>()
})
.transpose()?;
Ok(AnthropicRequestParts {
system,
messages,
tools,
})
}
fn tool_to_anthropic_tool(tool: Tool) -> Result<Value> {
let Tool {
name,
description,
schema,
config,
..
} = tool;
let name = match name {
ToolName::WebSearch => "web_search".to_string(),
ToolName::Custom(name) => name,
};
let mut tool_value = json!({"name": name});
#[allow(clippy::single_match)] match name.as_str() {
"web_search" => {
tool_value.x_insert("type", "web_search_20250305")?;
}
_ => (),
}
if tool_value.get("type").is_some() {
if let Some(config) = config {
match config {
ToolConfig::WebSearch(config) => {
if let Some(max_uses) = config.max_uses {
let _ = tool_value.x_insert("max_uses", max_uses);
}
if let Some(allowed_domains) = config.allowed_domains {
let _ = tool_value.x_insert("allowed_domains", allowed_domains);
}
if let Some(blocked_domains) = config.blocked_domains {
let _ = tool_value.x_insert("blocked_domains", blocked_domains);
}
}
ToolConfig::Custom(config) => {
tool_value.x_merge(config)?;
}
}
}
} else {
tool_value.x_insert("input_schema", schema)?;
if let Some(description) = description {
let _ = tool_value.x_insert("description", description);
}
}
Ok(tool_value)
}
}
fn cache_control_to_json(cache_control: &CacheControl) -> Value {
match cache_control {
CacheControl::Ephemeral => {
json!({"type": "ephemeral"})
}
CacheControl::Memory => {
json!({"type": "ephemeral"})
}
CacheControl::Ephemeral5m => {
json!({"type": "ephemeral", "ttl": "5m"})
}
CacheControl::Ephemeral1h => {
json!({"type": "ephemeral", "ttl": "1h"})
}
CacheControl::Ephemeral24h => {
json!({"type": "ephemeral", "ttl": "1h"})
}
}
}
pub(super) fn parse_cache_creation_details(cache_creation: &Value) -> Option<CacheCreationDetails> {
let ephemeral_5m_tokens = cache_creation
.get("ephemeral_5m_input_tokens")
.and_then(|v| v.as_i64())
.map(|v| v as i32);
let ephemeral_1h_tokens = cache_creation
.get("ephemeral_1h_input_tokens")
.and_then(|v| v.as_i64())
.map(|v| v as i32);
if ephemeral_5m_tokens.is_some() || ephemeral_1h_tokens.is_some() {
Some(CacheCreationDetails {
ephemeral_5m_tokens,
ephemeral_1h_tokens,
})
} else {
None
}
}
fn apply_cache_control_to_text(cache_control: Option<&CacheControl>, content: String) -> Value {
if let Some(cc) = cache_control {
let value = json!({"type": "text", "text": content, "cache_control": cache_control_to_json(cc)});
json!(vec![value])
}
else {
json!(content)
}
}
fn apply_cache_control_to_parts(cache_control: Option<&CacheControl>, parts: Vec<Value>) -> Vec<Value> {
let mut parts = parts;
if let Some(cc) = cache_control
&& !parts.is_empty()
{
let len = parts.len();
if let Some(last_value) = parts.get_mut(len - 1) {
let _ = last_value.x_insert("cache_control", cache_control_to_json(cc));
}
}
parts
}
pub(in crate::adapter) struct AnthropicRequestParts {
pub system: Option<Value>,
pub messages: Vec<Value>,
pub tools: Option<Vec<Value>>,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::ServiceTarget;
use crate::adapter::{Adapter, ServiceType};
use crate::chat::{ChatOptions, ChatRequest, JsonSpec};
use crate::resolver::AuthData;
#[test]
fn test_output_config_merges_effort_and_format() {
let chat_options = ChatOptions {
reasoning_effort: Some(ReasoningEffort::High),
response_format: Some(ChatResponseFormat::JsonSpec(JsonSpec::new(
"anthropic_ignores_name", json!({"type": "object", "properties": {}}),
))),
..Default::default()
};
let model_iden = ModelIden::new(AdapterKind::Anthropic, "claude-sonnet-4-6");
let target = ServiceTarget {
endpoint: AnthropicAdapter::default_endpoint(),
auth: AuthData::from_single("test-key"),
model: model_iden,
};
let options_set = ChatOptionsSet::default().with_chat_options(Some(&chat_options));
let result = AnthropicAdapter::to_web_request_data(
target,
ServiceType::Chat,
ChatRequest::from_user("hello"),
options_set,
);
let web_req = result.expect("to_web_request_data should succeed");
let output_config = web_req.payload.get("output_config").expect("output_config must be present");
assert_eq!(
output_config.get("effort").and_then(|v| v.as_str()),
Some("high"),
"effort must be present in output_config"
);
assert_eq!(
output_config.get("format").and_then(|f| f.get("type")).and_then(|v| v.as_str()),
Some("json_schema"),
"format.type must be present in output_config"
);
}
#[test]
fn test_cache_control_to_json_ephemeral() {
let result = cache_control_to_json(&CacheControl::Ephemeral);
assert_eq!(result, json!({"type": "ephemeral"}));
}
#[test]
fn test_cache_control_to_json_ephemeral_5m() {
let result = cache_control_to_json(&CacheControl::Ephemeral5m);
assert_eq!(result, json!({"type": "ephemeral", "ttl": "5m"}));
}
#[test]
fn test_cache_control_to_json_memory() {
let result = cache_control_to_json(&CacheControl::Memory);
assert_eq!(result, json!({"type": "ephemeral"}));
}
#[test]
fn test_cache_control_to_json_ephemeral_1h() {
let result = cache_control_to_json(&CacheControl::Ephemeral1h);
assert_eq!(result, json!({"type": "ephemeral", "ttl": "1h"}));
}
#[test]
fn test_cache_control_to_json_ephemeral_24h() {
let result = cache_control_to_json(&CacheControl::Ephemeral24h);
assert_eq!(result, json!({"type": "ephemeral", "ttl": "1h"}));
}
#[test]
fn test_parse_cache_creation_details_with_both_ttls() {
let cache_creation = json!({
"ephemeral_5m_input_tokens": 456,
"ephemeral_1h_input_tokens": 100
});
let result = parse_cache_creation_details(&cache_creation);
assert!(result.is_some());
let details = result.unwrap();
assert_eq!(details.ephemeral_5m_tokens, Some(456));
assert_eq!(details.ephemeral_1h_tokens, Some(100));
}
#[test]
fn test_parse_cache_creation_details_with_5m_only() {
let cache_creation = json!({
"ephemeral_5m_input_tokens": 456
});
let result = parse_cache_creation_details(&cache_creation);
assert!(result.is_some());
let details = result.unwrap();
assert_eq!(details.ephemeral_5m_tokens, Some(456));
assert_eq!(details.ephemeral_1h_tokens, None);
}
#[test]
fn test_parse_cache_creation_details_with_1h_only() {
let cache_creation = json!({
"ephemeral_1h_input_tokens": 100
});
let result = parse_cache_creation_details(&cache_creation);
assert!(result.is_some());
let details = result.unwrap();
assert_eq!(details.ephemeral_5m_tokens, None);
assert_eq!(details.ephemeral_1h_tokens, Some(100));
}
#[test]
fn test_parse_cache_creation_details_empty() {
let cache_creation = json!({});
let result = parse_cache_creation_details(&cache_creation);
assert!(result.is_none());
}
}