use super::convert::{convert_messages, convert_tools};
use crate::provider::CompletionRequest;
use serde_json::{Value, json};
pub fn build_converse_body(request: &CompletionRequest, model_id: &str) -> Value {
let (mut system_parts, messages) = convert_messages(&request.messages);
let tools = convert_tools(&request.tools);
if prompt_cache_enabled() && supports_prompt_caching(model_id) && !system_parts.is_empty() {
system_parts.push(json!({"cachePoint": {"type": "default"}}));
}
let mut body = json!({"messages": messages});
if !system_parts.is_empty() {
body["system"] = json!(system_parts);
}
let mut inference_config = json!({});
inference_config["maxTokens"] = json!(request.max_tokens.unwrap_or(8192));
let skip_temperature = model_id.to_ascii_lowercase().contains("claude-opus-4-7");
if let Some(temp) = request.temperature {
if !skip_temperature {
inference_config["temperature"] = json!(temp);
} else {
tracing::debug!(
provider = "bedrock",
model = %model_id,
"Skipping temperature parameter (deprecated for this model)"
);
}
}
if let Some(top_p) = request.top_p {
inference_config["topP"] = json!(top_p);
}
body["inferenceConfig"] = inference_config;
if let Some(service_tier) = configured_service_tier() {
tracing::debug!(
provider = "bedrock",
service_tier = %service_tier,
"Applying Bedrock service tier override"
);
body["additionalModelRequestFields"] = json!({"service_tier": service_tier});
}
if !tools.is_empty() {
body["toolConfig"] = json!({"tools": tools});
}
body
}
fn configured_service_tier() -> Option<String> {
std::env::var("CODETETHER_BEDROCK_SERVICE_TIER")
.ok()
.map(|v| v.trim().to_ascii_lowercase())
.filter(|v| !v.is_empty())
}
fn prompt_cache_enabled() -> bool {
match std::env::var("CODETETHER_BEDROCK_PROMPT_CACHE") {
Ok(v) => !matches!(
v.trim().to_ascii_lowercase().as_str(),
"0" | "false" | "no" | "off"
),
Err(_) => true,
}
}
fn supports_prompt_caching(model_id: &str) -> bool {
let id = model_id.to_ascii_lowercase();
id.contains("anthropic") || id.contains("claude")
}