use crate::types::{uri::raw::RawUri, FileType, InputContent};
mod html5ever;
mod html5gum;
mod markdown;
mod plaintext;
use markdown::extract_markdown;
use plaintext::extract_plaintext;
pub(crate) fn is_verbatim_elem(name: &str) -> bool {
matches!(
name,
"pre" | "code" | "textarea" | "samp" | "xmp" | "plaintext" | "listing"
)
}
#[derive(Default, Debug, Clone, Copy)]
pub struct Extractor {
use_html5ever: bool,
include_verbatim: bool,
}
impl Extractor {
#[must_use]
pub const fn new(use_html5ever: bool, include_verbatim: bool) -> Self {
Self {
use_html5ever,
include_verbatim,
}
}
#[must_use]
pub fn extract(&self, input_content: &InputContent) -> Vec<RawUri> {
match input_content.file_type {
FileType::Markdown => extract_markdown(&input_content.content, self.include_verbatim),
FileType::Html => {
if self.use_html5ever {
html5ever::extract_html(&input_content.content, self.include_verbatim)
} else {
html5gum::extract_html(&input_content.content, self.include_verbatim)
}
}
FileType::Plaintext => extract_plaintext(&input_content.content),
}
}
}
#[cfg(test)]
mod tests {
use reqwest::Url;
use std::{collections::HashSet, path::Path};
use super::*;
use crate::{
helpers::url::find_links,
test_utils::{load_fixture, mail, website},
types::{FileType, InputContent, InputSource},
Uri,
};
fn extract_uris(input: &str, file_type: FileType) -> HashSet<Uri> {
let input_content = InputContent::from_string(input, file_type);
let extractor = Extractor::new(false, false);
let uris_html5gum = extractor
.extract(&input_content)
.into_iter()
.filter_map(|raw_uri| Uri::try_from(raw_uri).ok())
.collect();
let extractor = Extractor::new(true, false);
let uris_html5ever = extractor
.extract(&input_content)
.into_iter()
.filter_map(|raw_uri| Uri::try_from(raw_uri).ok())
.collect();
assert_eq!(uris_html5gum, uris_html5ever);
uris_html5gum
}
#[test]
fn test_file_type() {
assert_eq!(FileType::from(Path::new("/")), FileType::Plaintext);
assert_eq!(FileType::from("test.md"), FileType::Markdown);
assert_eq!(FileType::from("test.markdown"), FileType::Markdown);
assert_eq!(FileType::from("test.html"), FileType::Html);
assert_eq!(FileType::from("test.txt"), FileType::Plaintext);
assert_eq!(FileType::from("test.something"), FileType::Plaintext);
assert_eq!(
FileType::from("/absolute/path/to/test.something"),
FileType::Plaintext
);
}
#[test]
fn test_skip_markdown_anchors() {
let links = extract_uris("This is [a test](#lol).", FileType::Markdown);
assert!(links.is_empty());
}
#[test]
fn test_skip_markdown_internal_urls() {
let links = extract_uris("This is [a test](./internal).", FileType::Markdown);
assert!(links.is_empty());
}
#[test]
fn test_skip_markdown_email() {
let input = "Get in touch - [Contact Us](mailto:test@test.com)";
let links = extract_uris(input, FileType::Markdown);
let expected = IntoIterator::into_iter([mail("test@test.com")]).collect::<HashSet<Uri>>();
assert_eq!(links, expected);
}
#[test]
fn relative_urls() {
let links = extract_uris("This is [a test](/internal).", FileType::Markdown);
assert!(links.is_empty());
}
#[test]
fn test_non_markdown_links() {
let input =
"https://endler.dev and https://hello-rust.show/foo/bar?lol=1 at test@example.com";
let links: HashSet<Uri> = extract_uris(input, FileType::Plaintext);
let expected = IntoIterator::into_iter([
website("https://endler.dev"),
website("https://hello-rust.show/foo/bar?lol=1"),
mail("test@example.com"),
])
.collect::<HashSet<Uri>>();
assert_eq!(links, expected);
}
#[test]
fn test_md_escape() {
let input = r#"http://msdn.microsoft.com/library/ie/ms535874\(v=vs.85\).aspx"#;
let links: Vec<_> = find_links(input).collect();
let expected = "http://msdn.microsoft.com/library/ie/ms535874(v=vs.85).aspx)";
matches!(&links[..], [link] if link.as_str() == expected);
}
#[test]
fn test_extract_html5_not_valid_xml() {
let input = load_fixture("TEST_HTML5.html");
let links = extract_uris(&input, FileType::Html);
let expected_links = IntoIterator::into_iter([
website("https://example.com/head/home"),
website("https://example.com/css/style_full_url.css"),
website("https://example.com/body/a"),
website("https://example.com/body/div_empty_a"),
])
.collect::<HashSet<Uri>>();
assert_eq!(links, expected_links);
}
#[test]
fn test_extract_relative_url() {
let source = InputSource::RemoteUrl(Box::new(
Url::parse("https://example.com/some-post").unwrap(),
));
let contents = r#"<html>
<div class="row">
<a href="https://github.com/lycheeverse/lychee/">Github</a>
<a href="/about">About</a>
</div>
</html>"#;
let input_content = &InputContent {
source,
file_type: FileType::Html,
content: contents.to_string(),
};
for use_html5ever in [true, false] {
let extractor = Extractor::new(use_html5ever, false);
let links = extractor.extract(input_content);
let urls = links
.into_iter()
.map(|raw_uri| raw_uri.text)
.collect::<HashSet<_>>();
let expected_urls = IntoIterator::into_iter([
String::from("https://github.com/lycheeverse/lychee/"),
String::from("/about"),
])
.collect::<HashSet<_>>();
assert_eq!(urls, expected_urls);
}
}
#[test]
fn test_extract_html5_lowercase_doctype() {
let input = load_fixture("TEST_HTML5_LOWERCASE_DOCTYPE.html");
let links = extract_uris(&input, FileType::Html);
let expected_links = IntoIterator::into_iter([website("https://example.com/body/a")])
.collect::<HashSet<Uri>>();
assert_eq!(links, expected_links);
}
#[test]
fn test_extract_html5_minified() {
let input = load_fixture("TEST_HTML5_MINIFIED.html");
let links = extract_uris(&input, FileType::Html);
let expected_links = IntoIterator::into_iter([
website("https://example.com/"),
website("https://example.com/favicon.ico"),
website("https://fonts.externalsite.com"),
website("https://example.com/docs/"),
website("https://example.com/forum"),
])
.collect::<HashSet<Uri>>();
assert_eq!(links, expected_links);
}
#[test]
fn test_extract_html5_malformed() {
let input = load_fixture("TEST_HTML5_MALFORMED_LINKS.html");
let links = extract_uris(&input, FileType::Html);
let expected_links = IntoIterator::into_iter([website("https://example.com/valid")])
.collect::<HashSet<Uri>>();
assert_eq!(links, expected_links);
}
#[test]
fn test_extract_html5_custom_elements() {
let input = load_fixture("TEST_HTML5_CUSTOM_ELEMENTS.html");
let links = extract_uris(&input, FileType::Html);
let expected_links = IntoIterator::into_iter([
website("https://example.com/some-weird-element"),
website("https://example.com/even-weirder-src"),
website("https://example.com/even-weirder-href"),
website("https://example.com/citations"),
])
.collect::<HashSet<Uri>>();
assert_eq!(links, expected_links);
}
#[test]
fn test_extract_urls_with_at_sign_properly() {
let input = "https://example.com/@test/test http://otherdomain.com/test/@test".to_string();
let links = extract_uris(&input, FileType::Plaintext);
let expected_links = IntoIterator::into_iter([
website("https://example.com/@test/test"),
website("http://otherdomain.com/test/@test"),
])
.collect::<HashSet<Uri>>();
assert_eq!(links, expected_links);
}
#[test]
fn test_extract_link_at_end_of_line() {
let input = "https://www.apache.org/licenses/LICENSE-2.0\n";
let links = extract_uris(input, FileType::Plaintext);
let expected_links =
IntoIterator::into_iter([website("https://www.apache.org/licenses/LICENSE-2.0")])
.collect::<HashSet<Uri>>();
assert_eq!(links, expected_links);
}
}