use crate::{Result, llm::LLMClient};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use tracing::{debug, info};
pub struct AbstractGenerator;
impl AbstractGenerator {
pub fn new() -> Self {
Self
}
pub async fn generate_with_llm(
&self,
content: &str,
llm: &Arc<dyn LLMClient>,
known_entities: &[String],
) -> Result<String> {
let char_count = content.chars().count();
info!(
"Generating L0 Abstract (content: {} chars, entities: {:?})",
char_count, known_entities
);
let system = r#"You are an expert at creating concise abstracts.
Your goal is to generate summaries that capture multiple key aspects of content for quick relevance checking.
Keep abstracts under 100 tokens. Prioritize breadth over depth - cover more topics briefly rather than elaborating on one.
Be direct and informative. Use compact phrasing to maximize information density.
When asked to preserve specific named entities, include them verbatim in the abstract."#;
let prompt = crate::llm::prompts::Prompts::abstract_generation_with_entities(
content,
known_entities,
);
debug!("L0 Abstract prompt length: {} chars", prompt.chars().count());
let result = llm.complete_with_system(system, &prompt).await?;
info!("L0 Abstract generated ({} chars)", result.chars().count());
Ok(result)
}
pub fn estimate_tokens(text: &str) -> usize {
text.len() / 3
}
}
pub struct OverviewGenerator;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Overview {
pub core_topics: Vec<String>,
pub key_points: Vec<String>,
pub entities: Vec<String>,
pub summary: String,
}
impl OverviewGenerator {
pub fn new() -> Self {
Self
}
pub async fn generate_with_llm(
&self,
content: &str,
llm: &Arc<dyn LLMClient>,
) -> Result<String> {
let safe_content: String = content.chars().take(16000).collect();
info!(
"Generating L1 Overview (content: {} chars, truncated from {})",
safe_content.chars().count(),
content.chars().count()
);
let system = r#"You are an expert at creating structured overviews.
Your goal is to provide comprehensive yet concise summaries (500-2000 tokens) that help users understand and make decisions about content.
Use clear markdown structure with sections for Summary, Core Topics, Key Points, Entities, and Context."#;
let prompt = crate::llm::prompts::Prompts::overview_generation(&safe_content);
debug!("L1 Overview prompt length: {} chars", prompt.chars().count());
let result = llm.complete_with_system(system, &prompt).await?;
info!("L1 Overview generated ({} chars)", result.chars().count());
Ok(result)
}
}