use super::{BaseProvider, ModelPricing, Provider, ProviderError, ProviderType};
use crate::config::ProviderConfig;
use crate::core::models::{RequestContext, openai::*};
use crate::utils::error::Result;
use async_trait::async_trait;
use serde_json::json;
use std::collections::HashMap;
use tracing::{debug, info};
#[derive(Debug, Clone)]
pub struct CohereProvider {
base: BaseProvider,
pricing_cache: HashMap<String, ModelPricing>,
}
impl CohereProvider {
pub async fn new(config: &ProviderConfig) -> Result<Self> {
let base = BaseProvider::new(config)?;
let base_url = config
.base_url
.clone()
.unwrap_or_else(|| "https://api.cohere.ai/v1".to_string());
let provider = Self {
base: BaseProvider { base_url, ..base },
pricing_cache: Self::initialize_pricing_cache(),
};
info!("Cohere provider '{}' initialized successfully", config.name);
Ok(provider)
}
fn initialize_pricing_cache() -> HashMap<String, ModelPricing> {
let mut cache = HashMap::new();
cache.insert(
"command".to_string(),
ModelPricing {
model: "command".to_string(),
input_cost_per_1k: 0.0015,
output_cost_per_1k: 0.002,
currency: "USD".to_string(),
updated_at: chrono::Utc::now(),
},
);
cache.insert(
"command-light".to_string(),
ModelPricing {
model: "command-light".to_string(),
input_cost_per_1k: 0.0003,
output_cost_per_1k: 0.0006,
currency: "USD".to_string(),
updated_at: chrono::Utc::now(),
},
);
cache.insert(
"embed-english-v3.0".to_string(),
ModelPricing {
model: "embed-english-v3.0".to_string(),
input_cost_per_1k: 0.0001,
output_cost_per_1k: 0.0,
currency: "USD".to_string(),
updated_at: chrono::Utc::now(),
},
);
cache
}
fn convert_messages_to_cohere(&self, messages: &[ChatMessage]) -> (Option<String>, String) {
let mut system_message = None;
let mut conversation_parts = Vec::new();
for message in messages {
match message.role {
MessageRole::System => {
if let Some(MessageContent::Text(text)) = &message.content {
system_message = Some(text.clone());
}
}
MessageRole::User => {
if let Some(content) = &message.content {
conversation_parts
.push(format!("User: {}", self.extract_text_content(content)));
}
}
MessageRole::Assistant => {
if let Some(content) = &message.content {
conversation_parts
.push(format!("Assistant: {}", self.extract_text_content(content)));
}
}
_ => {}
}
}
let conversation = conversation_parts.join("\n");
(system_message, conversation)
}
fn extract_text_content(&self, content: &MessageContent) -> String {
match content {
MessageContent::Text(text) => text.clone(),
MessageContent::Parts(parts) => parts
.iter()
.filter_map(|part| match part {
ContentPart::Text { text } => Some(text.clone()),
_ => None,
})
.collect::<Vec<String>>()
.join("\n"),
}
}
fn convert_cohere_response_to_openai(
&self,
cohere_response: serde_json::Value,
model: &str,
) -> Result<ChatCompletionResponse> {
let content = cohere_response
.get("generations")
.and_then(|g| g.as_array())
.and_then(|arr| arr.first())
.and_then(|generation| generation.get("text"))
.and_then(|text| text.as_str())
.unwrap_or("")
.to_string();
let usage = Usage {
prompt_tokens: 0, completion_tokens: 0,
total_tokens: 0,
prompt_tokens_details: None,
completion_tokens_details: None,
};
Ok(ChatCompletionResponse {
id: format!("chatcmpl-cohere-{}", uuid::Uuid::new_v4()),
object: "chat.completion".to_string(),
created: chrono::Utc::now().timestamp() as u64,
model: model.to_string(),
choices: vec![ChatChoice {
index: 0,
message: ChatMessage {
role: MessageRole::Assistant,
content: Some(MessageContent::Text(content)),
name: None,
function_call: None,
tool_calls: None,
tool_call_id: None,
audio: None,
},
finish_reason: Some("stop".to_string()),
logprobs: None,
}],
usage: Some(usage),
system_fingerprint: None,
})
}
fn create_headers(&self) -> reqwest::header::HeaderMap {
let mut headers = reqwest::header::HeaderMap::new();
headers.insert(
reqwest::header::AUTHORIZATION,
format!("Bearer {}", self.base.api_key).parse().unwrap(),
);
headers.insert(
reqwest::header::CONTENT_TYPE,
"application/json".parse().unwrap(),
);
headers
}
}
#[async_trait]
impl Provider for CohereProvider {
fn name(&self) -> &str {
&self.base.name
}
fn provider_type(&self) -> ProviderType {
ProviderType::Cohere
}
async fn supports_model(&self, model: &str) -> bool {
self.base.is_model_supported(model)
|| model.starts_with("command")
|| model.starts_with("embed-")
}
async fn supports_images(&self) -> bool {
false }
async fn supports_embeddings(&self) -> bool {
true
}
async fn supports_streaming(&self) -> bool {
true
}
async fn list_models(&self) -> Result<Vec<Model>> {
let known_models = vec![
"command",
"command-light",
"command-nightly",
"embed-english-v3.0",
"embed-multilingual-v3.0",
];
let models = known_models
.into_iter()
.map(|model| Model {
id: model.to_string(),
object: "model".to_string(),
created: chrono::Utc::now().timestamp() as u64,
owned_by: "cohere".to_string(),
})
.collect();
Ok(models)
}
async fn health_check(&self) -> Result<()> {
debug!("Performing Cohere health check");
Ok(())
}
async fn chat_completion(
&self,
request: ChatCompletionRequest,
_context: RequestContext,
) -> Result<ChatCompletionResponse> {
debug!("Cohere chat completion for model: {}", request.model);
let (system_message, _conversation) = self.convert_messages_to_cohere(&request.messages);
let prompt = request
.messages
.iter()
.filter(|msg| msg.role == MessageRole::User)
.next_back()
.and_then(|msg| msg.content.as_ref().map(|c| self.extract_text_content(c)))
.unwrap_or_default();
let mut body = json!({
"model": request.model,
"prompt": prompt,
"max_tokens": request.max_tokens.unwrap_or(4096),
});
if let Some(system) = system_message {
body["preamble"] = json!(system);
}
if let Some(temperature) = request.temperature {
body["temperature"] = json!(temperature);
}
if let Some(top_p) = request.top_p {
body["p"] = json!(top_p);
}
if let Some(stop) = request.stop {
body["stop_sequences"] = json!(stop);
}
let url = format!("{}/generate", self.base.base_url);
let response = self
.base
.client
.post(&url)
.headers(self.create_headers())
.json(&body)
.send()
.await
.map_err(|e| ProviderError::Network(e.to_string()))?;
if !response.status().is_success() {
let status = response.status();
let error_text = response.text().await.unwrap_or_default();
return Err(match status.as_u16() {
401 => ProviderError::Authentication(error_text),
429 => ProviderError::RateLimit(error_text),
400 => ProviderError::InvalidRequest(error_text),
_ => ProviderError::Unknown(format!("HTTP {}: {}", status, error_text)),
}
.into());
}
let cohere_response: serde_json::Value = self.base.parse_json_response(response).await?;
self.convert_cohere_response_to_openai(cohere_response, &request.model)
}
async fn completion(
&self,
request: CompletionRequest,
_context: RequestContext,
) -> Result<CompletionResponse> {
debug!("Cohere completion for model: {}", request.model);
let body = json!({
"model": request.model,
"prompt": request.prompt,
"max_tokens": request.max_tokens,
"temperature": request.temperature,
"p": request.top_p,
"stop_sequences": request.stop
});
let url = format!("{}/generate", self.base.base_url);
let response = self
.base
.client
.post(&url)
.headers(self.create_headers())
.json(&body)
.send()
.await
.map_err(|e| ProviderError::Network(e.to_string()))?;
let cohere_response: serde_json::Value = self.base.parse_json_response(response).await?;
let text = cohere_response
.get("generations")
.and_then(|g| g.as_array())
.and_then(|arr| arr.first())
.and_then(|generation| generation.get("text"))
.and_then(|text| text.as_str())
.unwrap_or("")
.to_string();
Ok(CompletionResponse {
id: format!("cmpl-cohere-{}", uuid::Uuid::new_v4()),
object: "text_completion".to_string(),
created: chrono::Utc::now().timestamp() as u64,
model: request.model,
choices: vec![CompletionChoice {
text,
index: 0,
logprobs: None,
finish_reason: Some("stop".to_string()),
}],
usage: Some(Usage {
prompt_tokens: 0,
completion_tokens: 0,
total_tokens: 0,
prompt_tokens_details: None,
completion_tokens_details: None,
}),
})
}
async fn embedding(
&self,
request: EmbeddingRequest,
_context: RequestContext,
) -> Result<EmbeddingResponse> {
debug!("Cohere embedding for model: {}", request.model);
let body = json!({
"model": request.model,
"texts": request.input
});
let url = format!("{}/embed", self.base.base_url);
let response = self
.base
.client
.post(&url)
.headers(self.create_headers())
.json(&body)
.send()
.await
.map_err(|e| ProviderError::Network(e.to_string()))?;
let cohere_response: serde_json::Value = self.base.parse_json_response(response).await?;
let embeddings = cohere_response
.get("embeddings")
.and_then(|e| e.as_array())
.unwrap_or(&vec![])
.iter()
.enumerate()
.map(|(index, embedding)| {
let embedding_vec = embedding
.as_array()
.unwrap_or(&vec![])
.iter()
.filter_map(|v| v.as_f64())
.collect();
EmbeddingObject {
object: "embedding".to_string(),
embedding: embedding_vec,
index: index as u32,
}
})
.collect();
Ok(EmbeddingResponse {
object: "list".to_string(),
data: embeddings,
model: request.model,
usage: EmbeddingUsage {
prompt_tokens: 0,
total_tokens: 0,
},
})
}
async fn image_generation(
&self,
_request: ImageGenerationRequest,
_context: RequestContext,
) -> Result<ImageGenerationResponse> {
Err(
ProviderError::InvalidRequest("Cohere does not support image generation".to_string())
.into(),
)
}
async fn get_model_pricing(&self, model: &str) -> Result<ModelPricing> {
if let Some(pricing) = self.pricing_cache.get(model) {
Ok(pricing.clone())
} else {
Ok(ModelPricing {
model: model.to_string(),
input_cost_per_1k: 0.0015, output_cost_per_1k: 0.002,
currency: "USD".to_string(),
updated_at: chrono::Utc::now(),
})
}
}
async fn calculate_cost(
&self,
model: &str,
input_tokens: u32,
output_tokens: u32,
) -> Result<f64> {
let pricing = self.get_model_pricing(model).await?;
let input_cost = (input_tokens as f64 / 1000.0) * pricing.input_cost_per_1k;
let output_cost = (output_tokens as f64 / 1000.0) * pricing.output_cost_per_1k;
Ok(input_cost + output_cost)
}
}