use super::{BaseProvider, ModelPricing, Provider, ProviderError, ProviderType};
use crate::config::ProviderConfig;
use crate::core::models::{RequestContext, openai::*};
use crate::utils::error::Result;
use async_trait::async_trait;
use serde_json::json;
use std::collections::HashMap;
use tracing::{debug, info};
#[derive(Debug, Clone)]
pub struct WriterProvider {
base: BaseProvider,
pricing_cache: HashMap<String, ModelPricing>,
}
impl WriterProvider {
pub async fn new(config: &ProviderConfig) -> Result<Self> {
let base = BaseProvider::new(config)?;
let base_url = config
.base_url
.clone()
.unwrap_or_else(|| "https://api.writer.com".to_string());
let provider = Self {
base: BaseProvider { base_url, ..base },
pricing_cache: Self::initialize_pricing_cache(),
};
info!("Writer provider '{}' initialized successfully", config.name);
Ok(provider)
}
fn initialize_pricing_cache() -> HashMap<String, ModelPricing> {
let mut cache = HashMap::new();
cache.insert(
"palmyra-x-004".to_string(),
ModelPricing {
model: "palmyra-x-004".to_string(),
input_cost_per_1k: 0.00025,
output_cost_per_1k: 0.00025,
currency: "USD".to_string(),
updated_at: chrono::Utc::now(),
},
);
cache.insert(
"palmyra-x-003".to_string(),
ModelPricing {
model: "palmyra-x-003".to_string(),
input_cost_per_1k: 0.0002,
output_cost_per_1k: 0.0002,
currency: "USD".to_string(),
updated_at: chrono::Utc::now(),
},
);
cache.insert(
"palmyra-x-002-instruct".to_string(),
ModelPricing {
model: "palmyra-x-002-instruct".to_string(),
input_cost_per_1k: 0.00015,
output_cost_per_1k: 0.00015,
currency: "USD".to_string(),
updated_at: chrono::Utc::now(),
},
);
cache
}
}
#[async_trait]
impl Provider for WriterProvider {
fn name(&self) -> &str {
&self.base.name
}
fn provider_type(&self) -> ProviderType {
ProviderType::Custom("writer".to_string())
}
async fn supports_model(&self, model: &str) -> bool {
self.base.is_model_supported(model) || model.starts_with("palmyra")
}
async fn supports_images(&self) -> bool {
false
}
async fn supports_embeddings(&self) -> bool {
false
}
async fn supports_streaming(&self) -> bool {
true
}
async fn list_models(&self) -> Result<Vec<Model>> {
let known_models = vec!["palmyra-x-004", "palmyra-x-003", "palmyra-x-002-instruct"];
let models = known_models
.into_iter()
.map(|model| Model {
id: model.to_string(),
object: "model".to_string(),
created: chrono::Utc::now().timestamp() as u64,
owned_by: "writer".to_string(),
})
.collect();
Ok(models)
}
async fn health_check(&self) -> Result<()> {
debug!("Performing Writer health check");
Ok(())
}
async fn chat_completion(
&self,
request: ChatCompletionRequest,
_context: RequestContext,
) -> Result<ChatCompletionResponse> {
debug!("Writer chat completion for model: {}", request.model);
let mut body = json!({
"model": request.model,
"messages": request.messages
});
if let Some(max_tokens) = request.max_tokens {
body["max_tokens"] = json!(max_tokens);
}
if let Some(temperature) = request.temperature {
body["temperature"] = json!(temperature);
}
if let Some(top_p) = request.top_p {
body["top_p"] = json!(top_p);
}
if let Some(stream) = request.stream {
body["stream"] = json!(stream);
}
let url = format!("{}/v1/chat/completions", self.base.base_url);
let response = self
.base
.client
.post(&url)
.header("Authorization", format!("Bearer {}", self.base.api_key))
.header("Content-Type", "application/json")
.json(&body)
.send()
.await
.map_err(|e| ProviderError::Network(e.to_string()))?;
if !response.status().is_success() {
let status = response.status();
let error_text = response.text().await.unwrap_or_default();
return Err(match status.as_u16() {
401 => ProviderError::Authentication(error_text),
429 => ProviderError::RateLimit(error_text),
404 => ProviderError::ModelNotFound(error_text),
400 => ProviderError::InvalidRequest(error_text),
_ => ProviderError::Unknown(format!("HTTP {}: {}", status, error_text)),
}
.into());
}
let response_json: ChatCompletionResponse = self.base.parse_json_response(response).await?;
Ok(response_json)
}
async fn completion(
&self,
request: CompletionRequest,
_context: RequestContext,
) -> Result<CompletionResponse> {
let chat_request = ChatCompletionRequest {
model: request.model.clone(),
messages: vec![ChatMessage {
role: MessageRole::User,
content: Some(MessageContent::Text(request.prompt)),
name: None,
function_call: None,
tool_calls: None,
tool_call_id: None,
audio: None,
}],
max_tokens: request.max_tokens,
temperature: request.temperature.map(|t| t as f32),
top_p: request.top_p.map(|t| t as f32),
n: request.n,
stream: request.stream,
stop: request.stop,
presence_penalty: request.presence_penalty.map(|p| p as f32),
frequency_penalty: request.frequency_penalty.map(|p| p as f32),
logit_bias: request
.logit_bias
.map(|bias| bias.into_iter().map(|(k, v)| (k, v as f32)).collect()),
user: request.user,
max_completion_tokens: None,
stream_options: None,
functions: None,
function_call: None,
tools: None,
tool_choice: None,
response_format: None,
seed: None,
logprobs: None,
top_logprobs: None,
modalities: None,
audio: None,
};
let chat_response = self.chat_completion(chat_request, _context).await?;
let text = match &chat_response.choices.first().unwrap().message.content {
Some(MessageContent::Text(text)) => text.clone(),
Some(MessageContent::Parts(parts)) => parts
.iter()
.filter_map(|part| match part {
ContentPart::Text { text } => Some(text.clone()),
_ => None,
})
.collect::<Vec<String>>()
.join(" "),
None => String::new(),
};
Ok(CompletionResponse {
id: chat_response.id.replace("chatcmpl", "cmpl"),
object: "text_completion".to_string(),
created: chat_response.created,
model: request.model,
choices: vec![CompletionChoice {
text,
index: 0,
logprobs: None,
finish_reason: chat_response.choices.first().unwrap().finish_reason.clone(),
}],
usage: chat_response.usage,
})
}
async fn embedding(
&self,
_request: EmbeddingRequest,
_context: RequestContext,
) -> Result<EmbeddingResponse> {
Err(ProviderError::InvalidRequest("Embeddings not supported by Writer".to_string()).into())
}
async fn image_generation(
&self,
_request: ImageGenerationRequest,
_context: RequestContext,
) -> Result<ImageGenerationResponse> {
Err(
ProviderError::InvalidRequest("Image generation not supported by Writer".to_string())
.into(),
)
}
async fn get_model_pricing(&self, model: &str) -> Result<ModelPricing> {
if let Some(pricing) = self.pricing_cache.get(model) {
Ok(pricing.clone())
} else {
Ok(ModelPricing {
model: model.to_string(),
input_cost_per_1k: 0.0002,
output_cost_per_1k: 0.0002,
currency: "USD".to_string(),
updated_at: chrono::Utc::now(),
})
}
}
async fn calculate_cost(
&self,
model: &str,
input_tokens: u32,
output_tokens: u32,
) -> Result<f64> {
let pricing = self.get_model_pricing(model).await?;
let input_cost = (input_tokens as f64 / 1000.0) * pricing.input_cost_per_1k;
let output_cost = (output_tokens as f64 / 1000.0) * pricing.output_cost_per_1k;
Ok(input_cost + output_cost)
}
}