1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
use std::collections::HashMap;
use std::path::PathBuf;
use std::time::Duration;
use serde::{Deserialize, Serialize};
use super::AssetCategory;
/// Metadata about an LLM extraction pass.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct ExtractionMeta {
/// Estimated cost of the LLM call in USD.
pub cost: Option<f64>,
/// Number of prompt (input) tokens consumed.
pub prompt_tokens: Option<u64>,
/// Number of completion (output) tokens generated.
pub completion_tokens: Option<u64>,
/// The model identifier used for extraction.
pub model: Option<String>,
/// Number of content chunks sent to the LLM.
pub chunks_processed: usize,
}
/// When to use the headless browser fallback.
#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum BrowserMode {
/// Automatically detect when JS rendering is needed and fall back to browser.
#[default]
Auto,
/// Always use the browser for every request.
Always,
/// Never use the browser fallback.
Never,
}
/// Wait strategy for browser page rendering.
#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
pub enum BrowserWait {
/// Wait until network activity is idle.
#[default]
NetworkIdle,
/// Wait for a specific CSS selector to appear in the DOM.
Selector,
/// Wait for a fixed duration after navigation.
Fixed,
}
pub(crate) mod duration_ms {
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::time::Duration;
pub fn serialize<S: Serializer>(d: &Duration, s: S) -> Result<S::Ok, S::Error> {
d.as_millis().serialize(s)
}
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Duration, D::Error> {
let ms = u64::deserialize(d)?;
Ok(Duration::from_millis(ms))
}
}
pub(crate) mod option_duration_ms {
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::time::Duration;
pub fn serialize<S: Serializer>(d: &Option<Duration>, s: S) -> Result<S::Ok, S::Error> {
d.map(|d| d.as_millis() as u64).serialize(s)
}
pub fn deserialize<'de, D: Deserializer<'de>>(d: D) -> Result<Option<Duration>, D::Error> {
let ms: Option<u64> = Option::deserialize(d)?;
Ok(ms.map(Duration::from_millis))
}
}
/// Proxy configuration for HTTP requests.
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct ProxyConfig {
/// Proxy URL (e.g. "http://proxy:8080", "socks5://proxy:1080").
pub url: String,
/// Optional username for proxy authentication.
pub username: Option<String>,
/// Optional password for proxy authentication.
pub password: Option<String>,
}
/// Authentication configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(deny_unknown_fields, tag = "type")]
pub enum AuthConfig {
/// HTTP Basic authentication.
#[serde(rename = "basic")]
Basic { username: String, password: String },
/// Bearer token authentication.
#[serde(rename = "bearer")]
Bearer { token: String },
/// Custom authentication header.
#[serde(rename = "header")]
Header { name: String, value: String },
}
impl Default for AuthConfig {
fn default() -> Self {
Self::Basic {
username: String::new(),
password: String::new(),
}
}
}
/// Content extraction and conversion configuration.
///
/// Controls how HTML is converted to the output format. Uses
/// html-to-markdown-rs as the conversion engine for all formats
/// (markdown, plain text, djot).
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(default)]
pub struct ContentConfig {
/// Output format: `"markdown"` (default), `"plain"`, `"djot"`.
pub output_format: String,
/// Preprocessing aggressiveness: `"minimal"`, `"standard"` (default), `"aggressive"`.
///
/// - Minimal: only scripts/styles removed.
/// - Standard: also removes nav, nav-hinted headers/footers/asides, forms.
/// - Aggressive: removes all footers/asides unconditionally.
pub preprocessing_preset: String,
/// Remove navigation elements (nav, breadcrumbs, menus). Default: `true`.
pub remove_navigation: bool,
/// Remove form elements. Default: `true`.
pub remove_forms: bool,
/// HTML tag names to strip (render children only, remove the tag wrapper).
/// Default: `["noscript"]`.
#[serde(default)]
pub strip_tags: Vec<String>,
/// HTML tag names to preserve as raw HTML in output.
#[serde(default)]
pub preserve_tags: Vec<String>,
/// CSS selectors for elements to exclude entirely (element + all content).
///
/// Unlike `strip_tags` (which removes the wrapper but keeps children),
/// excluded elements and all descendants are dropped. Supports CSS selectors:
/// `.class`, `#id`, `[attribute]`, compound selectors.
///
/// Example: `[".cookie-banner", "#ad-container", "[role='complementary']"]`
#[serde(default)]
pub exclude_selectors: Vec<String>,
/// Skip image elements in output. Default: `false`.
pub skip_images: bool,
/// Max DOM traversal depth. Prevents stack overflow on deeply nested HTML.
pub max_depth: Option<usize>,
/// Enable line wrapping. Default: `false`.
pub wrap: bool,
/// Wrap width when `wrap` is enabled. Default: `80`.
pub wrap_width: usize,
/// Include document structure tree in output. Default: `true`.
pub include_document_structure: bool,
}
impl Default for ContentConfig {
fn default() -> Self {
Self {
output_format: "markdown".to_owned(),
preprocessing_preset: "standard".to_owned(),
remove_navigation: true,
remove_forms: true,
strip_tags: vec!["noscript".to_owned()],
preserve_tags: Vec::new(),
exclude_selectors: Vec::new(),
skip_images: false,
max_depth: None,
wrap: false,
wrap_width: 80,
include_document_structure: true,
}
}
}
/// Browser fallback configuration.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(deny_unknown_fields, default)]
pub struct BrowserConfig {
/// When to use the headless browser fallback.
pub mode: BrowserMode,
/// CDP WebSocket endpoint for connecting to an external browser instance.
pub endpoint: Option<String>,
/// Timeout for browser page load and rendering (in milliseconds when serialized).
#[serde(with = "duration_ms")]
pub timeout: Duration,
/// Wait strategy after browser navigation.
pub wait: BrowserWait,
/// CSS selector to wait for when `wait` is `Selector`.
pub wait_selector: Option<String>,
/// Extra time to wait after the wait condition is met.
#[serde(default, with = "option_duration_ms")]
pub extra_wait: Option<Duration>,
}
impl Default for BrowserConfig {
fn default() -> Self {
Self {
mode: BrowserMode::Auto,
endpoint: None,
timeout: Duration::from_secs(30),
wait: BrowserWait::default(),
wait_selector: None,
extra_wait: None,
}
}
}
/// Configuration for crawl, scrape, and map operations.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(deny_unknown_fields, default)]
pub struct CrawlConfig {
/// Maximum crawl depth (number of link hops from the start URL).
pub max_depth: Option<usize>,
/// Maximum number of pages to crawl.
pub max_pages: Option<usize>,
/// Maximum number of concurrent requests.
pub max_concurrent: Option<usize>,
/// Whether to respect robots.txt directives.
pub respect_robots_txt: bool,
/// Custom user-agent string.
pub user_agent: Option<String>,
/// Whether to restrict crawling to the same domain.
pub stay_on_domain: bool,
/// Whether to allow subdomains when `stay_on_domain` is true.
pub allow_subdomains: bool,
/// Regex patterns for paths to include during crawling.
#[serde(default)]
pub include_paths: Vec<String>,
/// Regex patterns for paths to exclude during crawling.
#[serde(default)]
pub exclude_paths: Vec<String>,
/// Custom HTTP headers to send with each request.
#[serde(default)]
pub custom_headers: HashMap<String, String>,
/// Timeout for individual HTTP requests (in milliseconds when serialized).
#[serde(with = "duration_ms")]
pub request_timeout: Duration,
/// Per-domain rate limit in milliseconds. When set, enforces a minimum delay
/// between requests to the same domain. Defaults to 200ms when `None`.
pub rate_limit_ms: Option<u64>,
/// Maximum number of redirects to follow.
pub max_redirects: usize,
/// Number of retry attempts for failed requests.
pub retry_count: usize,
/// HTTP status codes that should trigger a retry.
#[serde(default)]
pub retry_codes: Vec<u16>,
/// Whether to enable cookie handling.
pub cookies_enabled: bool,
/// Authentication configuration.
pub auth: Option<AuthConfig>,
/// Maximum response body size in bytes.
pub max_body_size: Option<usize>,
/// CSS selectors for tags to remove from HTML before processing.
#[serde(default)]
pub remove_tags: Vec<String>,
/// Content extraction and conversion configuration.
#[serde(default)]
pub content: ContentConfig,
/// Maximum number of URLs to return from a map operation.
pub map_limit: Option<usize>,
/// Search filter for map results (case-insensitive substring match on URLs).
pub map_search: Option<String>,
/// Whether to download assets (CSS, JS, images, etc.) from the page.
pub download_assets: bool,
/// Filter for asset categories to download.
#[serde(default)]
pub asset_types: Vec<AssetCategory>,
/// Maximum size in bytes for individual asset downloads.
pub max_asset_size: Option<usize>,
/// Browser configuration.
#[serde(default)]
pub browser: BrowserConfig,
/// Proxy configuration for HTTP requests.
pub proxy: Option<ProxyConfig>,
/// List of user-agent strings for rotation. If non-empty, overrides `user_agent`.
#[serde(default)]
pub user_agents: Vec<String>,
/// Whether to capture a screenshot when using the browser.
pub capture_screenshot: bool,
/// Whether to download non-HTML documents (PDF, DOCX, images, code, etc.) instead of skipping them.
pub download_documents: bool,
/// Maximum size in bytes for document downloads. Defaults to 50 MB.
pub document_max_size: Option<usize>,
/// Allowlist of MIME types to download. If empty, uses built-in defaults.
#[serde(default)]
pub document_mime_types: Vec<String>,
/// Path to write WARC output. If `None`, WARC output is disabled.
pub warc_output: Option<PathBuf>,
/// Named browser profile for persistent sessions (cookies, localStorage).
pub browser_profile: Option<String>,
/// Whether to save changes back to the browser profile on exit.
pub save_browser_profile: bool,
/// Shared browser pool for reusing Chrome across requests (not serializable).
#[cfg(feature = "browser")]
#[serde(skip)]
pub browser_pool: Option<std::sync::Arc<crate::browser_pool::BrowserPool>>,
}
impl Default for CrawlConfig {
fn default() -> Self {
Self {
max_depth: None,
max_pages: None,
max_concurrent: None,
respect_robots_txt: false,
user_agent: None,
stay_on_domain: false,
allow_subdomains: false,
include_paths: Vec::new(),
exclude_paths: Vec::new(),
custom_headers: HashMap::new(),
request_timeout: Duration::from_secs(30),
rate_limit_ms: None,
max_redirects: 10,
retry_count: 0,
retry_codes: Vec::new(),
cookies_enabled: false,
auth: None,
max_body_size: None,
remove_tags: Vec::new(),
content: ContentConfig::default(),
map_limit: None,
map_search: None,
download_assets: false,
asset_types: Vec::new(),
max_asset_size: None,
browser: BrowserConfig::default(),
proxy: None,
user_agents: Vec::new(),
capture_screenshot: false,
download_documents: true,
document_max_size: Some(50 * 1024 * 1024), // 50 MB
document_mime_types: Vec::new(),
warc_output: None,
browser_profile: None,
save_browser_profile: false,
#[cfg(feature = "browser")]
browser_pool: None,
}
}
}
impl CrawlConfig {
/// Validate the configuration, returning an error if any values are invalid.
pub fn validate(&self) -> Result<(), crate::error::CrawlError> {
use crate::error::CrawlError;
if let Some(0) = self.max_concurrent {
return Err(CrawlError::InvalidConfig("max_concurrent must be > 0".into()));
}
if self.browser.wait == BrowserWait::Selector && self.browser.wait_selector.is_none() {
return Err(CrawlError::InvalidConfig(
"browser.wait_selector required when browser.wait is Selector".into(),
));
}
if let Some(max_pages) = self.max_pages
&& max_pages == 0
{
return Err(CrawlError::InvalidConfig("max_pages must be > 0".into()));
}
if self.max_redirects > 100 {
return Err(CrawlError::InvalidConfig("max_redirects must be <= 100".into()));
}
for pattern in &self.include_paths {
regex::Regex::new(pattern)
.map_err(|e| CrawlError::InvalidConfig(format!("invalid include_path regex '{pattern}': {e}")))?;
}
for pattern in &self.exclude_paths {
regex::Regex::new(pattern)
.map_err(|e| CrawlError::InvalidConfig(format!("invalid exclude_path regex '{pattern}': {e}")))?;
}
for &code in &self.retry_codes {
if !(100..=599).contains(&code) {
return Err(CrawlError::InvalidConfig(format!("invalid retry code: {code}")));
}
}
if self.request_timeout.is_zero() {
return Err(CrawlError::InvalidConfig("request_timeout must be > 0".into()));
}
Ok(())
}
}