openserve 2.0.3

A modern, high-performance, AI-enhanced file server built in Rust
Documentation
//! Content summarization using AI

use anyhow::Result;
use async_openai::{
    types::{
        ChatCompletionRequestMessage, ChatCompletionRequestSystemMessageArgs,
        ChatCompletionRequestUserMessageArgs, CreateChatCompletionRequestArgs,
    },
    Client,
};
use serde::{Deserialize, Serialize};
use tracing::debug;

use std::sync::Arc;

use crate::config::AiConfig;

/// Options for configuring content summarization.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SummaryOptions {
    /// The maximum length of the summary in words.
    pub max_length: usize,
    /// The desired style of the summary.
    pub style: SummaryStyle,
    /// Whether to include a list of keywords in the summary result.
    pub include_keywords: bool,
}

/// Defines the different styles of summaries that can be generated.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum SummaryStyle {
    /// A short, concise summary.
    Brief,
    /// A detailed, comprehensive summary.
    Detailed,
    /// A summary formatted as a list of bullet points.
    Bullet,
    /// A summary that focuses on technical aspects and concepts.
    Technical,
}

/// The result of a content summarization operation.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Summary {
    /// The generated summary text.
    pub text: String,
    /// A list of keywords extracted from the content.
    pub keywords: Vec<String>,
    /// The word count of the generated summary.
    pub word_count: usize,
    /// The confidence score of the summary generation.
    pub confidence: f32,
}

impl Default for SummaryOptions {
    fn default() -> Self {
        Self {
            max_length: 200,
            style: SummaryStyle::Brief,
            include_keywords: true,
        }
    }
}

/// Summarize content using AI
pub async fn summarize_content(
    client: &Client<async_openai::config::OpenAIConfig>,
    content: &str,
    options: &SummaryOptions,
    model: &str,
) -> Result<Summary> {
    let style_instruction = match options.style {
        SummaryStyle::Brief => "Provide a brief, concise summary",
        SummaryStyle::Detailed => "Provide a detailed, comprehensive summary",
        SummaryStyle::Bullet => "Provide a summary in bullet point format",
        SummaryStyle::Technical => "Provide a technical summary focusing on key concepts",
    };

    let system_prompt = format!(
        r#"You are a content summarization expert. {}. 
        Keep the summary under {} words.
        {}
        
        Respond in JSON format:
        {{
          "text": "summary text",
          "keywords": ["keyword1", "keyword2"],
          "word_count": 150,
          "confidence": 0.85
        }}"#,
        style_instruction,
        options.max_length,
        if options.include_keywords { "Also extract key keywords." } else { "" }
    );

    let request = CreateChatCompletionRequestArgs::default()
        .model(model)
        .messages([
            ChatCompletionRequestMessage::System(
                ChatCompletionRequestSystemMessageArgs::default()
                    .content(system_prompt)
                    .build()?
            ),
            ChatCompletionRequestMessage::User(
                ChatCompletionRequestUserMessageArgs::default()
                    .content(content)
                    .build()?
            ),
        ])
        .temperature(0.3)
        .max_tokens(50u32)
        .build()?;

    let response = client.chat().create(request).await?;
    
    let response_content = response.choices[0].message.content.as_ref()
        .ok_or_else(|| anyhow::anyhow!("No response from AI"))?;
    
    let summary: Summary = serde_json::from_str(response_content)?;
    Ok(summary)
}

/// Extract key phrases from content
pub async fn extract_key_phrases(
    client: &Client<async_openai::config::OpenAIConfig>,
    content: &str,
    max_phrases: usize,
    model: &str,
) -> Result<Vec<String>> {
    let request = CreateChatCompletionRequestArgs::default()
        .model(model)
        .messages([
            ChatCompletionRequestMessage::System(
                ChatCompletionRequestSystemMessageArgs::default()
                    .content(format!(
                        "Extract the {} most important key phrases from the following text. Return as a JSON array of strings.",
                        max_phrases
                    ))
                    .build()?
            ),
            ChatCompletionRequestMessage::User(
                ChatCompletionRequestUserMessageArgs::default()
                    .content(content)
                    .build()?
            ),
        ])
        .temperature(0.2)
        .max_tokens(200u32)
        .build()?;

    let response = client.chat().create(request).await?;
    
    let response_content = response.choices[0].message.content.as_ref()
        .ok_or_else(|| anyhow::anyhow!("No response from AI"))?;
    
    let phrases: Vec<String> = serde_json::from_str(response_content)?;
    Ok(phrases)
}

/// A service for performing content summarization tasks.
pub struct SummarizerService {
    client: async_openai::Client<async_openai::config::OpenAIConfig>,
    config: Arc<AiConfig>,
}

impl SummarizerService {
    /// Creates a new `SummarizerService` instance.
    pub fn new(config: Arc<AiConfig>) -> Result<Self> {
        let client = async_openai::Client::new();
        Ok(Self { client, config })
    }

    /// Summarize text using AI
    pub async fn summarize_text(&self, text: &str) -> Result<String> {
        debug!("Summarizing text of length: {}", text.len());

        let request = async_openai::types::CreateChatCompletionRequestArgs::default()
            .model(&self.config.model)
            .messages([
                async_openai::types::ChatCompletionRequestMessage::System(
                    async_openai::types::ChatCompletionRequestSystemMessageArgs::default()
                        .content("You are a helpful assistant that creates concise summaries.")
                        .build()?
                ),
                async_openai::types::ChatCompletionRequestMessage::User(
                    async_openai::types::ChatCompletionRequestUserMessageArgs::default()
                        .content(format!("Please provide a concise summary of the following text:\n\n{}", text))
                        .build()?
                )
            ])
            .max_tokens(50u32)
            .temperature(self.config.temperature)
            .build()?;

        let response = self.client.chat().create(request).await?;

        Ok(response.choices[0].message.content.as_ref()
            .unwrap_or(&"No summary generated".to_string())
            .clone())
    }

    /// Generate a title for text content
    pub async fn generate_title(&self, text: &str) -> Result<String> {
        debug!("Generating title for text of length: {}", text.len());

        let request = async_openai::types::CreateChatCompletionRequestArgs::default()
            .model(&self.config.model)
            .messages([
                async_openai::types::ChatCompletionRequestMessage::System(
                    async_openai::types::ChatCompletionRequestSystemMessageArgs::default()
                        .content("You are a helpful assistant that creates concise, descriptive titles.")
                        .build()?
                ),
                async_openai::types::ChatCompletionRequestMessage::User(
                    async_openai::types::ChatCompletionRequestUserMessageArgs::default()
                        .content(format!("Please generate a short, descriptive title for the following text:\n\n{}", text))
                        .build()?
                )
            ])
            .max_tokens(50u32)
            .temperature(0.3)
            .build()?;

        let response = self.client.chat().create(request).await?;

        Ok(response.choices[0].message.content.as_ref()
            .unwrap_or(&"Untitled".to_string())
            .clone())
    }
}