use anyhow::Result;
use async_openai::{
types::{
ChatCompletionRequestMessage, ChatCompletionRequestSystemMessageArgs,
ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs,
},
Client,
};
use serde::{Deserialize, Serialize};
use tracing::debug;
use std::sync::Arc;
use crate::config::AiConfig;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SummaryOptions {
pub max_length: usize,
pub style: SummaryStyle,
pub include_keywords: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum SummaryStyle {
Brief,
Detailed,
Bullet,
Technical,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Summary {
pub text: String,
pub keywords: Vec<String>,
pub word_count: usize,
pub confidence: f32,
}
impl Default for SummaryOptions {
fn default() -> Self {
Self {
max_length: 200,
style: SummaryStyle::Brief,
include_keywords: true,
}
}
}
pub async fn summarize_content(
client: &Client<async_openai::config::OpenAIConfig>,
content: &str,
options: &SummaryOptions,
model: &str,
) -> Result<Summary> {
let style_instruction = match options.style {
SummaryStyle::Brief => "Provide a brief, concise summary",
SummaryStyle::Detailed => "Provide a detailed, comprehensive summary",
SummaryStyle::Bullet => "Provide a summary in bullet point format",
SummaryStyle::Technical => "Provide a technical summary focusing on key concepts",
};
let system_prompt = format!(
r#"You are a content summarization expert. {}.
Keep the summary under {} words.
{}
Respond in JSON format:
{{
"text": "summary text",
"keywords": ["keyword1", "keyword2"],
"word_count": 150,
"confidence": 0.85
}}"#,
style_instruction,
options.max_length,
if options.include_keywords { "Also extract key keywords." } else { "" }
);
let request = CreateChatCompletionRequestArgs::default()
.model(model)
.messages([
ChatCompletionRequestMessage::System(
ChatCompletionRequestSystemMessageArgs::default()
.content(system_prompt)
.build()?
),
ChatCompletionRequestMessage::User(
ChatCompletionRequestUserMessageArgs::default()
.content(content)
.build()?
),
])
.temperature(0.3)
.max_tokens(50u32)
.build()?;
let response = client.chat().create(request).await?;
let response_content = response.choices[0].message.content.as_ref()
.ok_or_else(|| anyhow::anyhow!("No response from AI"))?;
let summary: Summary = serde_json::from_str(response_content)?;
Ok(summary)
}
pub async fn extract_key_phrases(
client: &Client<async_openai::config::OpenAIConfig>,
content: &str,
max_phrases: usize,
model: &str,
) -> Result<Vec<String>> {
let request = CreateChatCompletionRequestArgs::default()
.model(model)
.messages([
ChatCompletionRequestMessage::System(
ChatCompletionRequestSystemMessageArgs::default()
.content(format!(
"Extract the {} most important key phrases from the following text. Return as a JSON array of strings.",
max_phrases
))
.build()?
),
ChatCompletionRequestMessage::User(
ChatCompletionRequestUserMessageArgs::default()
.content(content)
.build()?
),
])
.temperature(0.2)
.max_tokens(200u32)
.build()?;
let response = client.chat().create(request).await?;
let response_content = response.choices[0].message.content.as_ref()
.ok_or_else(|| anyhow::anyhow!("No response from AI"))?;
let phrases: Vec<String> = serde_json::from_str(response_content)?;
Ok(phrases)
}
pub struct SummarizerService {
client: async_openai::Client<async_openai::config::OpenAIConfig>,
config: Arc<AiConfig>,
}
impl SummarizerService {
pub fn new(config: Arc<AiConfig>) -> Result<Self> {
let client = async_openai::Client::new();
Ok(Self { client, config })
}
pub async fn summarize_text(&self, text: &str) -> Result<String> {
debug!("Summarizing text of length: {}", text.len());
let request = async_openai::types::CreateChatCompletionRequestArgs::default()
.model(&self.config.model)
.messages([
async_openai::types::ChatCompletionRequestMessage::System(
async_openai::types::ChatCompletionRequestSystemMessageArgs::default()
.content("You are a helpful assistant that creates concise summaries.")
.build()?
),
async_openai::types::ChatCompletionRequestMessage::User(
async_openai::types::ChatCompletionRequestUserMessageArgs::default()
.content(format!("Please provide a concise summary of the following text:\n\n{}", text))
.build()?
)
])
.max_tokens(50u32)
.temperature(self.config.temperature)
.build()?;
let response = self.client.chat().create(request).await?;
Ok(response.choices[0].message.content.as_ref()
.unwrap_or(&"No summary generated".to_string())
.clone())
}
pub async fn generate_title(&self, text: &str) -> Result<String> {
debug!("Generating title for text of length: {}", text.len());
let request = async_openai::types::CreateChatCompletionRequestArgs::default()
.model(&self.config.model)
.messages([
async_openai::types::ChatCompletionRequestMessage::System(
async_openai::types::ChatCompletionRequestSystemMessageArgs::default()
.content("You are a helpful assistant that creates concise, descriptive titles.")
.build()?
),
async_openai::types::ChatCompletionRequestMessage::User(
async_openai::types::ChatCompletionRequestUserMessageArgs::default()
.content(format!("Please generate a short, descriptive title for the following text:\n\n{}", text))
.build()?
)
])
.max_tokens(50u32)
.temperature(0.3)
.build()?;
let response = self.client.chat().create(request).await?;
Ok(response.choices[0].message.content.as_ref()
.unwrap_or(&"Untitled".to_string())
.clone())
}
}