nab 0.7.1

Token-optimized HTTP client for LLMs — fetches any URL as clean markdown
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
//! Mozilla Readability article extraction.
//!
//! Extracts the main article content from HTML pages while filtering out
//! navigation, sidebars, footers, ads, and other boilerplate.
//!
//! # Strategy
//!
//! 1. Parse HTML into DOM with `scraper`
//! 2. Apply readability algorithm (score nodes by text density, remove unlikely candidates)
//! 3. Extract article metadata (title, excerpt)
//! 4. Return clean HTML for conversion to markdown
//!
//! # Fallback
//!
//! If extraction fails (e.g., non-article pages), returns `None` and the caller
//! falls back to raw `html2md` conversion.

use std::sync::LazyLock;

use scraper::{Html, Selector};

static H1_SELECTOR: LazyLock<Selector> =
    LazyLock::new(|| Selector::parse("h1").expect("static h1 selector"));
static ARTICLE_SELECTOR: LazyLock<Selector> =
    LazyLock::new(|| Selector::parse("article").expect("static article selector"));
static MAIN_SELECTOR: LazyLock<Selector> =
    LazyLock::new(|| Selector::parse("main").expect("static main selector"));
static DENSITY_CANDIDATE_SELECTOR: LazyLock<Selector> =
    LazyLock::new(|| Selector::parse("div, section").expect("static density selector"));
static TITLE_SELECTOR: LazyLock<Selector> =
    LazyLock::new(|| Selector::parse("title").expect("static title selector"));
static OG_TITLE_SELECTOR: LazyLock<Selector> = LazyLock::new(|| {
    Selector::parse("meta[property='og:title']").expect("static og:title selector")
});

/// Extracted article content from HTML.
#[derive(Debug, Clone)]
pub struct Article {
    /// Article title (from <title>, <h1>, or metadata).
    pub title: String,
    /// Main article content as clean HTML.
    pub content_html: String,
    /// Plain text excerpt (~200 chars).
    pub excerpt: String,
    /// Text-only version of the content.
    pub text_content: String,
}

/// Extract article content using readability algorithm.
///
/// Returns `Some(Article)` if extraction succeeds, `None` if the page
/// doesn't look like an article (e.g., homepage, search results).
pub fn extract_article(html: &str, url: &str) -> Option<Article> {
    let readability_result = extract_with_readability_crate(html, url);
    let scraper_result = extract_with_scraper(html);

    // Pick the result with more content.  The readability crate sometimes
    // truncates list-heavy articles (e.g. Ghost blogs with <ol>/<li>), so
    // compare its output with our semantic <article>/<main> extraction and
    // use whichever captured more text.
    match (readability_result, scraper_result) {
        (Some(r), Some(s)) => {
            tracing::debug!(
                "readability: {} chars, scraper: {} chars",
                r.text_content.len(),
                s.text_content.len()
            );
            if s.text_content.len() > r.text_content.len() {
                Some(s)
            } else {
                Some(r)
            }
        }
        (Some(r), None) => Some(r),
        (None, Some(s)) => Some(s),
        (None, None) => None,
    }
}

/// Extract using the readability crate.
fn extract_with_readability_crate(html: &str, url: &str) -> Option<Article> {
    // Parse with readability crate
    let product =
        readability::extractor::extract(&mut html.as_bytes(), &url::Url::parse(url).ok()?).ok()?;

    // Verify we got meaningful content (at least 100 chars)
    let text_content = strip_html_tags(&product.content);
    if text_content.len() < 100 {
        return None;
    }

    // Create excerpt (first ~200 chars of text)
    let excerpt = text_content
        .chars()
        .take(200)
        .collect::<String>()
        .trim()
        .to_string();

    // For title, prefer h1 from extracted content over page title
    let title = if let Some(h1_title) = extract_h1_from_html(&product.content) {
        h1_title
    } else {
        product.title
    };

    Some(Article {
        title,
        content_html: product.content,
        excerpt,
        text_content,
    })
}

/// Extract h1 text from HTML fragment.
fn extract_h1_from_html(html: &str) -> Option<String> {
    let document = Html::parse_fragment(html);
    let h1 = document.select(&H1_SELECTOR).next()?;
    let title = h1.text().collect::<Vec<_>>().join(" ").trim().to_string();
    if title.is_empty() { None } else { Some(title) }
}

/// Fallback extraction using scraper and basic heuristics.
///
/// Strategy:
/// 1. Find <article>, <main>, or largest content block by text density
/// 2. Strip <nav>, <header>, <footer>, <aside>, <script>, <style>
/// 3. Score remaining blocks by text-to-tag ratio
fn extract_with_scraper(html: &str) -> Option<Article> {
    let document = Html::parse_document(html);

    // Try semantic HTML5 elements first
    if let Some(article) = try_semantic_extraction(&document) {
        return Some(article);
    }

    // Fallback: find largest content block by text density
    find_main_content_by_density(&document)
}

/// Try extracting from semantic HTML5 elements (<article>, <main>).
fn try_semantic_extraction(document: &Html) -> Option<Article> {
    // Try <article> first
    if let Some(article_elem) = document.select(&ARTICLE_SELECTOR).next() {
        return extract_from_element(article_elem, document);
    }

    // Try <main>
    if let Some(main_elem) = document.select(&MAIN_SELECTOR).next() {
        return extract_from_element(main_elem, document);
    }

    None
}

/// Extract article from a specific DOM element.
fn extract_from_element(
    element: scraper::element_ref::ElementRef,
    document: &Html,
) -> Option<Article> {
    // Get the HTML of this element
    let content_html = element.html();

    // Extract text content
    let text_content = element.text().collect::<Vec<_>>().join(" ");

    // Clean up whitespace
    let text_content = text_content
        .split_whitespace()
        .collect::<Vec<_>>()
        .join(" ");

    if text_content.len() < 100 {
        return None; // Too short to be meaningful article
    }

    // Extract title (from <h1> or <title>)
    let title = extract_title(document);

    // Create excerpt
    let excerpt = text_content
        .chars()
        .take(200)
        .collect::<String>()
        .trim()
        .to_string();

    Some(Article {
        title,
        content_html,
        excerpt,
        text_content,
    })
}

/// Find main content by analyzing text density of all elements.
fn find_main_content_by_density(document: &Html) -> Option<Article> {
    // Score all div and section elements
    let mut scored_elements = Vec::new();

    for element in document.select(&DENSITY_CANDIDATE_SELECTOR) {
        // Skip elements that look like navigation/boilerplate
        if is_unlikely_candidate(&element) {
            continue;
        }

        let text = element.text().collect::<Vec<_>>().join(" ");
        let text_len = text.len();

        if text_len < 100 {
            continue; // Too short
        }

        // Calculate text density (text length / HTML length)
        let html_len = element.html().len();
        #[allow(clippy::cast_precision_loss)]
        let density = text_len as f64 / html_len.max(1) as f64;

        scored_elements.push((density, element, text));
    }

    // Sort by density (highest first)
    scored_elements.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap_or(std::cmp::Ordering::Equal));

    // Take the highest-scoring element
    if let Some((_, element, text)) = scored_elements.first() {
        let title = extract_title(document);
        let text_content = text.split_whitespace().collect::<Vec<_>>().join(" ");
        let excerpt = text_content
            .chars()
            .take(200)
            .collect::<String>()
            .trim()
            .to_string();

        return Some(Article {
            title,
            content_html: element.html(),
            excerpt,
            text_content,
        });
    }

    None
}

/// Check if an element is unlikely to be main content.
fn is_unlikely_candidate(element: &scraper::element_ref::ElementRef) -> bool {
    let class = element.value().attr("class").unwrap_or("");
    let id = element.value().attr("id").unwrap_or("");
    let combined = format!("{class} {id}").to_lowercase();

    combined.contains("nav")
        || combined.contains("menu")
        || combined.contains("sidebar")
        || combined.contains("footer")
        || combined.contains("header")
        || combined.contains("advertisement")
        || combined.contains("ad-")
        || combined.contains("social")
        || combined.contains("share")
        || combined.contains("comment")
}

/// Extract title from document (`<title>`, `<h1>`, or `OpenGraph`).
fn extract_title(document: &Html) -> String {
    // Try <h1> first
    if let Some(h1) = document.select(&H1_SELECTOR).next() {
        let title = h1.text().collect::<Vec<_>>().join(" ").trim().to_string();
        if !title.is_empty() {
            return title;
        }
    }

    // Try <title>
    if let Some(title) = document.select(&TITLE_SELECTOR).next() {
        let title = title
            .text()
            .collect::<Vec<_>>()
            .join(" ")
            .trim()
            .to_string();
        if !title.is_empty() {
            return title;
        }
    }

    // Try OpenGraph meta tag
    if let Some(og) = document.select(&OG_TITLE_SELECTOR).next()
        && let Some(content) = og.value().attr("content")
    {
        let title = content.trim().to_string();
        if !title.is_empty() {
            return title;
        }
    }

    "Untitled".to_string()
}

/// Strip HTML tags from content to get plain text.
fn strip_html_tags(html: &str) -> String {
    let document = Html::parse_fragment(html);
    document
        .root_element()
        .text()
        .collect::<Vec<_>>()
        .join(" ")
        .split_whitespace()
        .collect::<Vec<_>>()
        .join(" ")
}

#[cfg(test)]
mod tests {
    use super::*;

    static DIV_SELECTOR: LazyLock<Selector> =
        LazyLock::new(|| Selector::parse("div").expect("static div selector"));

    #[test]
    fn extracts_article_with_semantic_html() {
        let html = r"
            <html>
            <head><title>Test Article</title></head>
            <body>
                <header><nav>Site Navigation</nav></header>
                <main>
                    <article>
                        <h1>Article Title</h1>
                        <p>This is the main article content with enough text to be recognized as an article body.</p>
                        <p>It has multiple paragraphs to ensure proper extraction.</p>
                    </article>
                </main>
                <footer>© 2025 Copyright</footer>
            </body>
            </html>
        ";

        let article = extract_article(html, "https://example.com/article").unwrap();

        // The title may come from <title> or <h1> depending on extraction method
        // Both are acceptable for readability
        assert!(
            article.title == "Article Title" || article.title == "Test Article",
            "Expected 'Article Title' or 'Test Article', got '{}'",
            article.title
        );
        assert!(article.text_content.contains("main article content"));
        assert!(!article.text_content.contains("Site Navigation"));
        assert!(!article.text_content.contains("Copyright"));
    }

    #[test]
    fn extracts_from_main_element() {
        let html = r"
            <html>
            <head><title>Page Title</title></head>
            <body>
                <header>Header Content</header>
                <main>
                    <h1>Main Content Title</h1>
                    <p>This is the main content area with substantial text to pass the length threshold.</p>
                    <p>Multiple paragraphs ensure proper detection.</p>
                </main>
                <aside>Sidebar</aside>
            </body>
            </html>
        ";

        let article = extract_article(html, "https://example.com/page").unwrap();
        // Title may come from various sources - just verify content extraction works
        assert!(!article.title.is_empty());
        assert!(article.text_content.contains("main content area"));
        assert!(!article.text_content.contains("Sidebar"));
    }

    #[test]
    fn filters_unlikely_candidates() {
        let html = r#"
            <html>
            <body>
                <div class="navigation">Nav Links</div>
                <div class="sidebar">Sidebar Content</div>
                <div class="content">
                    <h1>Real Article</h1>
                    <p>This is the actual article content with enough text to be recognized as the main content.</p>
                    <p>It should be extracted while filtering out navigation and sidebar.</p>
                </div>
                <div class="footer">Footer</div>
            </body>
            </html>
        "#;

        let article = extract_article(html, "https://example.com/article").unwrap();
        assert!(article.text_content.contains("actual article content"));
        assert!(!article.text_content.contains("Nav Links"));
        assert!(!article.text_content.contains("Sidebar"));
    }

    #[test]
    fn returns_none_for_non_article_pages() {
        let html = r"
            <html>
            <body>
                <div>Short</div>
                <div>Text</div>
            </body>
            </html>
        ";

        // Should return None for pages without substantial content
        let result = extract_article(html, "https://example.com/");
        assert!(result.is_none());
    }

    #[test]
    fn extracts_title_from_h1() {
        let html = r"
            <html>
            <head><title>Page Title in Head</title></head>
            <body>
                <article>
                    <h1>Article Heading</h1>
                    <p>This is article content with sufficient length to pass extraction thresholds.</p>
                    <p>Multiple paragraphs ensure proper handling.</p>
                </article>
            </body>
            </html>
        ";

        let article = extract_article(html, "https://example.com/article").unwrap();
        // Readability may prefer <title> over <h1> - both are valid
        assert!(
            article.title == "Article Heading" || article.title == "Page Title in Head",
            "Expected 'Article Heading' or 'Page Title in Head', got '{}'",
            article.title
        );
    }

    #[test]
    fn extracts_title_from_title_tag_fallback() {
        let html = r"
            <html>
            <head><title>Page Title</title></head>
            <body>
                <article>
                    <p>Article content without an h1 but with enough text to be extracted as content.</p>
                    <p>Multiple paragraphs present for proper extraction.</p>
                </article>
            </body>
            </html>
        ";

        let article = extract_article(html, "https://example.com/article").unwrap();
        assert_eq!(article.title, "Page Title");
    }

    #[test]
    fn creates_excerpt() {
        let html = r"
            <html>
            <body>
                <article>
                    <p>This is a long article with substantial content that should be extracted. The excerpt should be created from the beginning of this text.</p>
                    <p>More content follows in subsequent paragraphs to ensure proper extraction.</p>
                </article>
            </body>
            </html>
        ";

        let article = extract_article(html, "https://example.com/article").unwrap();
        assert!(!article.excerpt.is_empty());
        assert!(article.excerpt.len() <= 200);
        assert!(article.excerpt.starts_with("This is a long article"));
    }

    #[test]
    fn strips_html_tags() {
        let html = "<p>Hello <strong>world</strong> with <a href='#'>links</a></p>";
        let text = strip_html_tags(html);
        assert_eq!(text, "Hello world with links");
        assert!(!text.contains('<'));
        assert!(!text.contains('>'));
    }

    #[test]
    fn extracts_full_ghost_blog_with_ordered_list() {
        // Ghost CMS produces <article> with <ol><li> content.
        // html2md truncates ordered list items, so the scraper path
        // (which uses element.text()) must capture the full text.
        let html = r#"
            <html>
            <head><title>Porting Software</title></head>
            <body>
                <header><nav>Site Nav</nav></header>
                <main class="site-main">
                    <article class="gh-article post tag-ai">
                        <header class="gh-article-header">
                            <h1 class="gh-article-title">porting software has been trivial</h1>
                        </header>
                        <div class="gh-content gh-canvas">
                            <p>This one is short and sweet. if you want to port a codebase from one language to another here's the approach:</p>
                            <ol>
                                <li>Run a ralph loop which compresses all tests into specs which looks similar to study every file in tests using separate subagents and document in specs and link the implementation as citations in the specification</li>
                                <li>Then do a separate Ralph loop for all product functionality ensuring there are citations to the specification. Study every file in src using separate subagents per file and link the implementation as citations in the specification</li>
                                <li>Once you have that within the same repo run a Ralph loop to create a TODO file and then execute a classic ralph doing just one thing and the most important thing per loop. Remind the agent that it can study the specifications and follow the citations to reference source code.</li>
                                <li>For best outcomes you wanna configure your target language to have strict compilation</li>
                            </ol>
                            <p>The key theory here is usage of citations in the specifications which tease the file_read tool to study the original implementation during stage 3. Reducing stage 1 and stage 2 to specs is the precursor which transforms a code base into high level PRDs without coupling the implementation from the source language.</p>
                        </div>
                    </article>
                </main>
                <section class="newsletter-signup"><p>Subscribe</p></section>
                <footer>Copyright</footer>
            </body>
            </html>
        "#;

        let article = extract_article(html, "https://ghuntley.com/porting/").unwrap();

        // Must contain the conclusion paragraph (currently truncated by html2md)
        assert!(
            article
                .text_content
                .contains("high level PRDs without coupling"),
            "Missing conclusion paragraph. text_content ({} chars): {}",
            article.text_content.len(),
            &article.text_content[..article.text_content.len().min(500)]
        );
        // Must contain list items
        assert!(
            article.text_content.contains("Ralph loop"),
            "Missing list content"
        );
        // Should be substantial (the full article is ~1200+ chars)
        assert!(
            article.text_content.len() > 800,
            "Text too short: {} chars",
            article.text_content.len()
        );
    }

    #[test]
    fn test_is_unlikely_candidate_detects_boilerplate() {
        let html = r#"<div class="navigation">Nav</div>"#;
        let doc = Html::parse_fragment(html);
        let element = doc.select(&DIV_SELECTOR).next().unwrap();
        assert!(is_unlikely_candidate(&element));

        let html = r#"<div class="sidebar">Side</div>"#;
        let doc = Html::parse_fragment(html);
        let element = doc.select(&DIV_SELECTOR).next().unwrap();
        assert!(is_unlikely_candidate(&element));

        let html = r#"<div class="content">Content</div>"#;
        let doc = Html::parse_fragment(html);
        let element = doc.select(&DIV_SELECTOR).next().unwrap();
        assert!(!is_unlikely_candidate(&element));
    }
}