use std::sync::LazyLock;
use scraper::{Html, Selector};
static H1_SELECTOR: LazyLock<Selector> =
LazyLock::new(|| Selector::parse("h1").expect("static h1 selector"));
static ARTICLE_SELECTOR: LazyLock<Selector> =
LazyLock::new(|| Selector::parse("article").expect("static article selector"));
static MAIN_SELECTOR: LazyLock<Selector> =
LazyLock::new(|| Selector::parse("main").expect("static main selector"));
static DENSITY_CANDIDATE_SELECTOR: LazyLock<Selector> =
LazyLock::new(|| Selector::parse("div, section").expect("static density selector"));
static TITLE_SELECTOR: LazyLock<Selector> =
LazyLock::new(|| Selector::parse("title").expect("static title selector"));
static OG_TITLE_SELECTOR: LazyLock<Selector> = LazyLock::new(|| {
Selector::parse("meta[property='og:title']").expect("static og:title selector")
});
#[derive(Debug, Clone)]
pub struct Article {
pub title: String,
pub content_html: String,
pub excerpt: String,
pub text_content: String,
}
pub fn extract_article(html: &str, url: &str) -> Option<Article> {
let readability_result = extract_with_readability_crate(html, url);
let scraper_result = extract_with_scraper(html);
match (readability_result, scraper_result) {
(Some(r), Some(s)) => {
tracing::debug!(
"readability: {} chars, scraper: {} chars",
r.text_content.len(),
s.text_content.len()
);
if s.text_content.len() > r.text_content.len() {
Some(s)
} else {
Some(r)
}
}
(Some(r), None) => Some(r),
(None, Some(s)) => Some(s),
(None, None) => None,
}
}
fn extract_with_readability_crate(html: &str, url: &str) -> Option<Article> {
let product =
readability::extractor::extract(&mut html.as_bytes(), &url::Url::parse(url).ok()?).ok()?;
let text_content = strip_html_tags(&product.content);
if text_content.len() < 100 {
return None;
}
let excerpt = text_content
.chars()
.take(200)
.collect::<String>()
.trim()
.to_string();
let title = if let Some(h1_title) = extract_h1_from_html(&product.content) {
h1_title
} else {
product.title
};
Some(Article {
title,
content_html: product.content,
excerpt,
text_content,
})
}
fn extract_h1_from_html(html: &str) -> Option<String> {
let document = Html::parse_fragment(html);
let h1 = document.select(&H1_SELECTOR).next()?;
let title = h1.text().collect::<Vec<_>>().join(" ").trim().to_string();
if title.is_empty() { None } else { Some(title) }
}
fn extract_with_scraper(html: &str) -> Option<Article> {
let document = Html::parse_document(html);
if let Some(article) = try_semantic_extraction(&document) {
return Some(article);
}
find_main_content_by_density(&document)
}
fn try_semantic_extraction(document: &Html) -> Option<Article> {
if let Some(article_elem) = document.select(&ARTICLE_SELECTOR).next() {
return extract_from_element(article_elem, document);
}
if let Some(main_elem) = document.select(&MAIN_SELECTOR).next() {
return extract_from_element(main_elem, document);
}
None
}
fn extract_from_element(
element: scraper::element_ref::ElementRef,
document: &Html,
) -> Option<Article> {
let content_html = element.html();
let text_content = element.text().collect::<Vec<_>>().join(" ");
let text_content = text_content
.split_whitespace()
.collect::<Vec<_>>()
.join(" ");
if text_content.len() < 100 {
return None; }
let title = extract_title(document);
let excerpt = text_content
.chars()
.take(200)
.collect::<String>()
.trim()
.to_string();
Some(Article {
title,
content_html,
excerpt,
text_content,
})
}
fn find_main_content_by_density(document: &Html) -> Option<Article> {
let mut scored_elements = Vec::new();
for element in document.select(&DENSITY_CANDIDATE_SELECTOR) {
if is_unlikely_candidate(&element) {
continue;
}
let text = element.text().collect::<Vec<_>>().join(" ");
let text_len = text.len();
if text_len < 100 {
continue; }
let html_len = element.html().len();
#[allow(clippy::cast_precision_loss)]
let density = text_len as f64 / html_len.max(1) as f64;
scored_elements.push((density, element, text));
}
scored_elements.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap_or(std::cmp::Ordering::Equal));
if let Some((_, element, text)) = scored_elements.first() {
let title = extract_title(document);
let text_content = text.split_whitespace().collect::<Vec<_>>().join(" ");
let excerpt = text_content
.chars()
.take(200)
.collect::<String>()
.trim()
.to_string();
return Some(Article {
title,
content_html: element.html(),
excerpt,
text_content,
});
}
None
}
fn is_unlikely_candidate(element: &scraper::element_ref::ElementRef) -> bool {
let class = element.value().attr("class").unwrap_or("");
let id = element.value().attr("id").unwrap_or("");
let combined = format!("{class} {id}").to_lowercase();
combined.contains("nav")
|| combined.contains("menu")
|| combined.contains("sidebar")
|| combined.contains("footer")
|| combined.contains("header")
|| combined.contains("advertisement")
|| combined.contains("ad-")
|| combined.contains("social")
|| combined.contains("share")
|| combined.contains("comment")
}
fn extract_title(document: &Html) -> String {
if let Some(h1) = document.select(&H1_SELECTOR).next() {
let title = h1.text().collect::<Vec<_>>().join(" ").trim().to_string();
if !title.is_empty() {
return title;
}
}
if let Some(title) = document.select(&TITLE_SELECTOR).next() {
let title = title
.text()
.collect::<Vec<_>>()
.join(" ")
.trim()
.to_string();
if !title.is_empty() {
return title;
}
}
if let Some(og) = document.select(&OG_TITLE_SELECTOR).next()
&& let Some(content) = og.value().attr("content")
{
let title = content.trim().to_string();
if !title.is_empty() {
return title;
}
}
"Untitled".to_string()
}
fn strip_html_tags(html: &str) -> String {
let document = Html::parse_fragment(html);
document
.root_element()
.text()
.collect::<Vec<_>>()
.join(" ")
.split_whitespace()
.collect::<Vec<_>>()
.join(" ")
}
#[cfg(test)]
mod tests {
use super::*;
static DIV_SELECTOR: LazyLock<Selector> =
LazyLock::new(|| Selector::parse("div").expect("static div selector"));
#[test]
fn extracts_article_with_semantic_html() {
let html = r"
<html>
<head><title>Test Article</title></head>
<body>
<header><nav>Site Navigation</nav></header>
<main>
<article>
<h1>Article Title</h1>
<p>This is the main article content with enough text to be recognized as an article body.</p>
<p>It has multiple paragraphs to ensure proper extraction.</p>
</article>
</main>
<footer>© 2025 Copyright</footer>
</body>
</html>
";
let article = extract_article(html, "https://example.com/article").unwrap();
assert!(
article.title == "Article Title" || article.title == "Test Article",
"Expected 'Article Title' or 'Test Article', got '{}'",
article.title
);
assert!(article.text_content.contains("main article content"));
assert!(!article.text_content.contains("Site Navigation"));
assert!(!article.text_content.contains("Copyright"));
}
#[test]
fn extracts_from_main_element() {
let html = r"
<html>
<head><title>Page Title</title></head>
<body>
<header>Header Content</header>
<main>
<h1>Main Content Title</h1>
<p>This is the main content area with substantial text to pass the length threshold.</p>
<p>Multiple paragraphs ensure proper detection.</p>
</main>
<aside>Sidebar</aside>
</body>
</html>
";
let article = extract_article(html, "https://example.com/page").unwrap();
assert!(!article.title.is_empty());
assert!(article.text_content.contains("main content area"));
assert!(!article.text_content.contains("Sidebar"));
}
#[test]
fn filters_unlikely_candidates() {
let html = r#"
<html>
<body>
<div class="navigation">Nav Links</div>
<div class="sidebar">Sidebar Content</div>
<div class="content">
<h1>Real Article</h1>
<p>This is the actual article content with enough text to be recognized as the main content.</p>
<p>It should be extracted while filtering out navigation and sidebar.</p>
</div>
<div class="footer">Footer</div>
</body>
</html>
"#;
let article = extract_article(html, "https://example.com/article").unwrap();
assert!(article.text_content.contains("actual article content"));
assert!(!article.text_content.contains("Nav Links"));
assert!(!article.text_content.contains("Sidebar"));
}
#[test]
fn returns_none_for_non_article_pages() {
let html = r"
<html>
<body>
<div>Short</div>
<div>Text</div>
</body>
</html>
";
let result = extract_article(html, "https://example.com/");
assert!(result.is_none());
}
#[test]
fn extracts_title_from_h1() {
let html = r"
<html>
<head><title>Page Title in Head</title></head>
<body>
<article>
<h1>Article Heading</h1>
<p>This is article content with sufficient length to pass extraction thresholds.</p>
<p>Multiple paragraphs ensure proper handling.</p>
</article>
</body>
</html>
";
let article = extract_article(html, "https://example.com/article").unwrap();
assert!(
article.title == "Article Heading" || article.title == "Page Title in Head",
"Expected 'Article Heading' or 'Page Title in Head', got '{}'",
article.title
);
}
#[test]
fn extracts_title_from_title_tag_fallback() {
let html = r"
<html>
<head><title>Page Title</title></head>
<body>
<article>
<p>Article content without an h1 but with enough text to be extracted as content.</p>
<p>Multiple paragraphs present for proper extraction.</p>
</article>
</body>
</html>
";
let article = extract_article(html, "https://example.com/article").unwrap();
assert_eq!(article.title, "Page Title");
}
#[test]
fn creates_excerpt() {
let html = r"
<html>
<body>
<article>
<p>This is a long article with substantial content that should be extracted. The excerpt should be created from the beginning of this text.</p>
<p>More content follows in subsequent paragraphs to ensure proper extraction.</p>
</article>
</body>
</html>
";
let article = extract_article(html, "https://example.com/article").unwrap();
assert!(!article.excerpt.is_empty());
assert!(article.excerpt.len() <= 200);
assert!(article.excerpt.starts_with("This is a long article"));
}
#[test]
fn strips_html_tags() {
let html = "<p>Hello <strong>world</strong> with <a href='#'>links</a></p>";
let text = strip_html_tags(html);
assert_eq!(text, "Hello world with links");
assert!(!text.contains('<'));
assert!(!text.contains('>'));
}
#[test]
fn extracts_full_ghost_blog_with_ordered_list() {
let html = r#"
<html>
<head><title>Porting Software</title></head>
<body>
<header><nav>Site Nav</nav></header>
<main class="site-main">
<article class="gh-article post tag-ai">
<header class="gh-article-header">
<h1 class="gh-article-title">porting software has been trivial</h1>
</header>
<div class="gh-content gh-canvas">
<p>This one is short and sweet. if you want to port a codebase from one language to another here's the approach:</p>
<ol>
<li>Run a ralph loop which compresses all tests into specs which looks similar to study every file in tests using separate subagents and document in specs and link the implementation as citations in the specification</li>
<li>Then do a separate Ralph loop for all product functionality ensuring there are citations to the specification. Study every file in src using separate subagents per file and link the implementation as citations in the specification</li>
<li>Once you have that within the same repo run a Ralph loop to create a TODO file and then execute a classic ralph doing just one thing and the most important thing per loop. Remind the agent that it can study the specifications and follow the citations to reference source code.</li>
<li>For best outcomes you wanna configure your target language to have strict compilation</li>
</ol>
<p>The key theory here is usage of citations in the specifications which tease the file_read tool to study the original implementation during stage 3. Reducing stage 1 and stage 2 to specs is the precursor which transforms a code base into high level PRDs without coupling the implementation from the source language.</p>
</div>
</article>
</main>
<section class="newsletter-signup"><p>Subscribe</p></section>
<footer>Copyright</footer>
</body>
</html>
"#;
let article = extract_article(html, "https://ghuntley.com/porting/").unwrap();
assert!(
article
.text_content
.contains("high level PRDs without coupling"),
"Missing conclusion paragraph. text_content ({} chars): {}",
article.text_content.len(),
&article.text_content[..article.text_content.len().min(500)]
);
assert!(
article.text_content.contains("Ralph loop"),
"Missing list content"
);
assert!(
article.text_content.len() > 800,
"Text too short: {} chars",
article.text_content.len()
);
}
#[test]
fn test_is_unlikely_candidate_detects_boilerplate() {
let html = r#"<div class="navigation">Nav</div>"#;
let doc = Html::parse_fragment(html);
let element = doc.select(&DIV_SELECTOR).next().unwrap();
assert!(is_unlikely_candidate(&element));
let html = r#"<div class="sidebar">Side</div>"#;
let doc = Html::parse_fragment(html);
let element = doc.select(&DIV_SELECTOR).next().unwrap();
assert!(is_unlikely_candidate(&element));
let html = r#"<div class="content">Content</div>"#;
let doc = Html::parse_fragment(html);
let element = doc.select(&DIV_SELECTOR).next().unwrap();
assert!(!is_unlikely_candidate(&element));
}
}