Skip to main content

pivot_pdf/
reader.rs

1use std::collections::{HashMap, HashSet};
2use std::io;
3use std::path::Path;
4
5// ── Error type ────────────────────────────────────────────────────────────────
6
7/// Errors that can occur when reading a PDF file.
8#[derive(Debug, PartialEq)]
9pub enum PdfReadError {
10    /// The bytes do not start with a valid `%PDF-` header.
11    NotAPdf,
12    /// The `startxref` keyword or its offset could not be found.
13    StartxrefNotFound,
14    /// The cross-reference table is missing or could not be parsed.
15    MalformedXref,
16    /// The trailer dictionary is missing or malformed.
17    MalformedTrailer,
18    /// The PDF uses a cross-reference stream (PDF 1.5+), which is not yet supported.
19    XrefStreamNotSupported,
20    /// An object reference could not be resolved (offset out of range or malformed).
21    UnresolvableObject(u32),
22    /// The page tree structure is invalid (missing /Count or /Pages).
23    MalformedPageTree,
24    /// An I/O error occurred while opening a file.
25    Io(String),
26}
27
28impl std::fmt::Display for PdfReadError {
29    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
30        match self {
31            PdfReadError::NotAPdf => write!(f, "not a PDF file"),
32            PdfReadError::StartxrefNotFound => write!(f, "startxref not found"),
33            PdfReadError::MalformedXref => write!(f, "malformed or missing xref table"),
34            PdfReadError::MalformedTrailer => write!(f, "malformed or missing trailer"),
35            PdfReadError::XrefStreamNotSupported => {
36                write!(
37                    f,
38                    "cross-reference streams (PDF 1.5+) are not yet supported"
39                )
40            }
41            PdfReadError::UnresolvableObject(n) => write!(f, "cannot resolve object {}", n),
42            PdfReadError::MalformedPageTree => write!(f, "malformed page tree"),
43            PdfReadError::Io(msg) => write!(f, "I/O error: {}", msg),
44        }
45    }
46}
47
48impl std::error::Error for PdfReadError {}
49
50impl From<io::Error> for PdfReadError {
51    fn from(e: io::Error) -> Self {
52        PdfReadError::Io(e.to_string())
53    }
54}
55
56// ── Public API ─────────────────────────────────────────────────────────────────
57
58/// Reads an existing PDF file.
59///
60/// `PdfReader` parses the PDF's cross-reference table and trailer to locate
61/// and resolve objects. The raw bytes and xref offset map are retained so that
62/// future enhancements (editing, field extraction, merging) can resolve
63/// arbitrary objects without re-reading the file.
64///
65/// # Limitations
66/// PDF 1.5+ cross-reference streams are not supported. Files that use them
67/// return `PdfReadError::XrefStreamNotSupported`.
68pub struct PdfReader {
69    /// Raw file bytes retained for object resolution (merging, field extraction, etc.).
70    data: Vec<u8>,
71    /// Maps each object number to its byte offset in `data`.
72    xref: HashMap<u32, usize>,
73    version: String,
74    page_count: usize,
75    /// Object number of the document catalog (`/Type /Catalog`).
76    catalog_num: u32,
77}
78
79impl PdfReader {
80    /// Open a PDF from a file path.
81    pub fn open<P: AsRef<Path>>(path: P) -> Result<Self, PdfReadError> {
82        let data = std::fs::read(path.as_ref())?;
83        Self::from_bytes(data)
84    }
85
86    /// Parse a PDF from raw bytes.
87    pub fn from_bytes(data: Vec<u8>) -> Result<Self, PdfReadError> {
88        let version = parse_version(&data)?;
89        let xref_offset = find_startxref(&data)?;
90        let (xref, root_ref) = parse_xref_and_trailer(&data, xref_offset)?;
91        let page_count = resolve_page_count(&data, &xref, root_ref)?;
92
93        Ok(PdfReader {
94            data,
95            xref,
96            version,
97            page_count,
98            catalog_num: root_ref,
99        })
100    }
101
102    /// Number of pages in the document.
103    pub fn page_count(&self) -> usize {
104        self.page_count
105    }
106
107    /// PDF version string (e.g. `"1.7"`).
108    pub fn pdf_version(&self) -> &str {
109        &self.version
110    }
111
112    // ── Internal infrastructure for Issue 28 (PDF Merge) ───────────────────────
113    // Dead-code warnings are expected until Issue 28 calls these methods.
114    #[allow(dead_code)]
115    /// Return page object numbers in document order by walking the page tree.
116    ///
117    /// Walks the Catalog → Pages tree recursively, collecting the object number
118    /// of each leaf `/Page` node in the order they appear in the document.
119    pub(crate) fn page_object_numbers(&self) -> Result<Vec<u32>, PdfReadError> {
120        let catalog_dict = resolve_dict(&self.data, &self.xref, self.catalog_num)?;
121        let pages_ref = catalog_dict
122            .get("Pages")
123            .ok_or(PdfReadError::MalformedPageTree)?;
124        let pages_num: u32 = pages_ref
125            .parse()
126            .map_err(|_| PdfReadError::MalformedPageTree)?;
127
128        let mut result = Vec::new();
129        walk_page_tree(&self.data, &self.xref, pages_num, &mut result)?;
130        Ok(result)
131    }
132
133    /// Build the transitive closure of all objects reachable from `roots`.
134    ///
135    /// BFS from the seed object numbers, scanning each object's raw bytes for
136    /// indirect references (`N G R`) and adding any newly-seen xref objects to
137    /// the queue. Returns the complete set of object numbers a merge function
138    /// needs to copy.
139    #[allow(dead_code)]
140    pub(crate) fn collect_closure(&self, roots: &[u32]) -> Result<HashSet<u32>, PdfReadError> {
141        let mut visited: HashSet<u32> = HashSet::new();
142        let mut queue: Vec<u32> = roots.to_vec();
143
144        while let Some(obj_num) = queue.pop() {
145            if !visited.insert(obj_num) {
146                continue; // already visited
147            }
148
149            if let Ok(bytes) = self.raw_object_bytes(obj_num) {
150                for r in extract_indirect_refs(bytes) {
151                    if !visited.contains(&r) && self.xref.contains_key(&r) {
152                        queue.push(r);
153                    }
154                }
155            }
156        }
157
158        Ok(visited)
159    }
160
161    #[allow(dead_code)]
162    /// Return the raw bytes of object `obj_num` — from `N G obj` through `endobj`.
163    ///
164    /// The slice is a view into the internal buffer; no allocation occurs.
165    ///
166    /// **Limitation:** locates `endobj` by scanning for the literal bytes. If a
167    /// stream body happens to contain those bytes (e.g. a binary font program
168    /// compressed by an external tool), the returned slice will be truncated. For
169    /// PDFs generated by this library this cannot occur.
170    pub(crate) fn raw_object_bytes(&self, obj_num: u32) -> Result<&[u8], PdfReadError> {
171        let offset = self
172            .xref
173            .get(&obj_num)
174            .copied()
175            .ok_or(PdfReadError::UnresolvableObject(obj_num))?;
176
177        if offset >= self.data.len() {
178            return Err(PdfReadError::UnresolvableObject(obj_num));
179        }
180
181        let slice = &self.data[offset..];
182        let endobj_pos = slice
183            .windows(6)
184            .position(|w| w == b"endobj")
185            .ok_or(PdfReadError::UnresolvableObject(obj_num))?;
186
187        Ok(&slice[..endobj_pos + 6])
188    }
189}
190
191// ── Internal parsing ───────────────────────────────────────────────────────────
192
193/// Extract the PDF version from the `%PDF-x.y` header.
194fn parse_version(data: &[u8]) -> Result<String, PdfReadError> {
195    if data.len() < 8 || !data.starts_with(b"%PDF-") {
196        return Err(PdfReadError::NotAPdf);
197    }
198    // Version is the characters after "%PDF-" up to the first whitespace.
199    let rest = &data[5..];
200    let end = rest
201        .iter()
202        .position(|&b| b == b'\n' || b == b'\r' || b == b' ')
203        .unwrap_or(rest.len());
204    let version = std::str::from_utf8(&rest[..end])
205        .map(|s| s.to_string())
206        .map_err(|_| PdfReadError::NotAPdf)?;
207    Ok(version)
208}
209
210/// Scan backward from the end of the file to find the `startxref` offset.
211///
212/// The PDF spec places `startxref\n{offset}\n%%EOF` near the end of the file.
213/// We search within the last 1024 bytes to handle comments or trailing whitespace.
214fn find_startxref(data: &[u8]) -> Result<usize, PdfReadError> {
215    let search_start = data.len().saturating_sub(1024);
216    let tail = &data[search_start..];
217
218    // Search backward for the keyword "startxref"
219    let keyword = b"startxref";
220    let pos = tail
221        .windows(keyword.len())
222        .rposition(|w| w == keyword)
223        .ok_or(PdfReadError::StartxrefNotFound)?;
224
225    // The offset integer follows on the next line.
226    let after = &tail[pos + keyword.len()..];
227    let offset_str = skip_whitespace_to_token(after).ok_or(PdfReadError::StartxrefNotFound)?;
228    let offset: usize = offset_str
229        .parse()
230        .map_err(|_| PdfReadError::StartxrefNotFound)?;
231
232    if offset >= data.len() {
233        return Err(PdfReadError::StartxrefNotFound);
234    }
235
236    Ok(offset)
237}
238
239/// Parse the xref table starting at `xref_offset` and the following trailer.
240///
241/// Returns `(object_offset_map, root_object_number)`.
242fn parse_xref_and_trailer(
243    data: &[u8],
244    xref_offset: usize,
245) -> Result<(HashMap<u32, usize>, u32), PdfReadError> {
246    if xref_offset >= data.len() {
247        return Err(PdfReadError::MalformedXref);
248    }
249
250    let section = &data[xref_offset..];
251
252    // Check for cross-reference stream (PDF 1.5+): starts with "N 0 obj" not "xref"
253    let trimmed = skip_ascii_whitespace(section);
254    if !trimmed.starts_with(b"xref") {
255        return Err(PdfReadError::XrefStreamNotSupported);
256    }
257
258    let xref = parse_xref_table(section)?;
259    let root = parse_trailer_root(data, xref_offset)?;
260
261    Ok((xref, root))
262}
263
264/// Parse the traditional xref table.
265///
266/// Each subsection has a header line `{first_obj} {count}` followed by
267/// 20-byte fixed-width entries: `{offset:010} {gen:05} {n|f}\r\n`.
268fn parse_xref_table(section: &[u8]) -> Result<HashMap<u32, usize>, PdfReadError> {
269    let mut map = HashMap::new();
270
271    // Skip "xref\n"
272    let rest = skip_ascii_whitespace(consume_token(section, b"xref")?);
273
274    let mut cursor = rest;
275    loop {
276        let trimmed = skip_ascii_whitespace(cursor);
277        // Stop at "trailer" or end of section
278        if trimmed.is_empty() || trimmed.starts_with(b"trailer") {
279            break;
280        }
281
282        // Subsection header: "{first_obj} {count}"
283        let (first_obj_str, after_first) =
284            next_token(trimmed).ok_or(PdfReadError::MalformedXref)?;
285        let first_obj: u32 = first_obj_str
286            .parse()
287            .map_err(|_| PdfReadError::MalformedXref)?;
288
289        let after_first = skip_ascii_whitespace(after_first);
290        let (count_str, after_count) =
291            next_token(after_first).ok_or(PdfReadError::MalformedXref)?;
292        let count: usize = count_str.parse().map_err(|_| PdfReadError::MalformedXref)?;
293
294        // Each entry is exactly 20 bytes: "oooooooooo ggggg n/f\r\n"
295        let entries_start = skip_line(after_count);
296        let entry_size = 20;
297        let entries_bytes = entries_start.len();
298
299        if entries_bytes < count * entry_size {
300            return Err(PdfReadError::MalformedXref);
301        }
302
303        for i in 0..count {
304            let entry = &entries_start[i * entry_size..(i + 1) * entry_size];
305            // Offset: first 10 bytes
306            let offset_bytes = &entry[..10];
307            // Status: byte 17 ('n' = in-use, 'f' = free)
308            let status = entry[17];
309
310            if status == b'n' {
311                let offset_str =
312                    std::str::from_utf8(offset_bytes).map_err(|_| PdfReadError::MalformedXref)?;
313                let offset: usize = offset_str
314                    .parse()
315                    .map_err(|_| PdfReadError::MalformedXref)?;
316                let obj_num = first_obj + i as u32;
317                if obj_num > 0 {
318                    map.insert(obj_num, offset);
319                }
320            }
321        }
322
323        cursor = &entries_start[count * entry_size..];
324    }
325
326    Ok(map)
327}
328
329/// Extract the `/Root` object number from the trailer dictionary.
330fn parse_trailer_root(data: &[u8], xref_offset: usize) -> Result<u32, PdfReadError> {
331    // Find "trailer" after the xref table
332    let section = &data[xref_offset..];
333    let pos = section
334        .windows(7)
335        .position(|w| w == b"trailer")
336        .ok_or(PdfReadError::MalformedTrailer)?;
337
338    let after_trailer = skip_ascii_whitespace(&section[pos + 7..]);
339
340    // Parse the trailer dictionary to find /Root
341    let dict = parse_dict_bytes(after_trailer).ok_or(PdfReadError::MalformedTrailer)?;
342
343    let root_ref = dict.get("Root").ok_or(PdfReadError::MalformedTrailer)?;
344    // Root value is a reference: "N M R" — we only need N
345    let obj_num: u32 = root_ref
346        .parse()
347        .map_err(|_| PdfReadError::MalformedTrailer)?;
348    Ok(obj_num)
349}
350
351/// Follow the catalog → pages chain to read the `/Count` value.
352fn resolve_page_count(
353    data: &[u8],
354    xref: &HashMap<u32, usize>,
355    catalog_obj_num: u32,
356) -> Result<usize, PdfReadError> {
357    // Resolve catalog object → get /Pages reference
358    let catalog_dict = resolve_dict(data, xref, catalog_obj_num)?;
359
360    let pages_ref = catalog_dict
361        .get("Pages")
362        .ok_or(PdfReadError::MalformedPageTree)?;
363    let pages_obj_num: u32 = pages_ref
364        .parse()
365        .map_err(|_| PdfReadError::MalformedPageTree)?;
366
367    // Resolve pages object → read /Count
368    let pages_dict = resolve_dict(data, xref, pages_obj_num)?;
369
370    let count_str = pages_dict
371        .get("Count")
372        .ok_or(PdfReadError::MalformedPageTree)?;
373    let count: usize = count_str
374        .parse()
375        .map_err(|_| PdfReadError::MalformedPageTree)?;
376
377    Ok(count)
378}
379
380/// Resolve an indirect object by number, parse its body as a dictionary,
381/// and return a flat `name → first-token-of-value` map.
382fn resolve_dict(
383    data: &[u8],
384    xref: &HashMap<u32, usize>,
385    obj_num: u32,
386) -> Result<HashMap<String, String>, PdfReadError> {
387    let offset = xref
388        .get(&obj_num)
389        .copied()
390        .ok_or(PdfReadError::UnresolvableObject(obj_num))?;
391
392    if offset >= data.len() {
393        return Err(PdfReadError::UnresolvableObject(obj_num));
394    }
395
396    let slice = &data[offset..];
397
398    // Skip "N G obj" header
399    let after_header = skip_obj_header(slice).ok_or(PdfReadError::UnresolvableObject(obj_num))?;
400    let after_ws = skip_ascii_whitespace(after_header);
401
402    parse_dict_bytes(after_ws).ok_or(PdfReadError::UnresolvableObject(obj_num))
403}
404
405// ── Page tree helpers ──────────────────────────────────────────────────────────
406
407#[allow(dead_code)]
408/// Recursively walk a page tree node, appending leaf `/Page` object numbers to `result`.
409fn walk_page_tree(
410    data: &[u8],
411    xref: &HashMap<u32, usize>,
412    node: u32,
413    result: &mut Vec<u32>,
414) -> Result<(), PdfReadError> {
415    let dict = resolve_dict(data, xref, node)?;
416    match dict.get("Type").map(String::as_str) {
417        Some("/Page") => {
418            result.push(node);
419        }
420        Some("/Pages") | None => {
421            let kids = resolve_kids(data, xref, node)?;
422            for kid in kids {
423                walk_page_tree(data, xref, kid, result)?;
424            }
425        }
426        _ => {}
427    }
428    Ok(())
429}
430
431#[allow(dead_code)]
432/// Extract the `/Kids` array of indirect object numbers from a Pages node.
433fn resolve_kids(
434    data: &[u8],
435    xref: &HashMap<u32, usize>,
436    obj_num: u32,
437) -> Result<Vec<u32>, PdfReadError> {
438    let offset = xref
439        .get(&obj_num)
440        .copied()
441        .ok_or(PdfReadError::UnresolvableObject(obj_num))?;
442
443    if offset >= data.len() {
444        return Err(PdfReadError::UnresolvableObject(obj_num));
445    }
446
447    // Bound the search to this object's bytes only (up to and including `endobj`),
448    // so we never accidentally find `/Kids` from a later object.
449    let slice = &data[offset..];
450    let obj_end = slice
451        .windows(6)
452        .position(|w| w == b"endobj")
453        .map(|p| p + 6)
454        .unwrap_or(slice.len());
455    let slice = &slice[..obj_end];
456
457    let after_header = skip_obj_header(slice).ok_or(PdfReadError::UnresolvableObject(obj_num))?;
458    let after_ws = skip_ascii_whitespace(after_header);
459
460    // Locate `/Kids` within the object bytes and parse the following array.
461    let needle = b"/Kids";
462    let kids_pos = after_ws
463        .windows(needle.len())
464        .position(|w| w == needle)
465        .ok_or(PdfReadError::MalformedPageTree)?;
466
467    let after_kids = skip_ascii_whitespace(&after_ws[kids_pos + needle.len()..]);
468    if !after_kids.starts_with(b"[") {
469        return Err(PdfReadError::MalformedPageTree);
470    }
471
472    parse_ref_array(after_kids).ok_or(PdfReadError::MalformedPageTree)
473}
474
475#[allow(dead_code)]
476/// Parse `[N G R N G R ...]` and return the object numbers.
477fn parse_ref_array(data: &[u8]) -> Option<Vec<u32>> {
478    debug_assert!(data.starts_with(b"["));
479    let end = data.iter().position(|&b| b == b']')?;
480    let inner = &data[1..end];
481
482    let mut result = Vec::new();
483    let mut cursor = inner;
484
485    loop {
486        cursor = skip_ascii_whitespace(cursor);
487        if cursor.is_empty() {
488            break;
489        }
490
491        let (n_str, rest) = next_token(cursor)?;
492        let Ok(n) = n_str.parse::<u32>() else { break };
493
494        let rest = skip_ascii_whitespace(rest);
495        let (_g_str, rest) = next_token(rest)?; // generation number
496
497        let rest = skip_ascii_whitespace(rest);
498        let (r_str, rest) = next_token(rest)?;
499        if r_str != "R" {
500            break;
501        }
502
503        result.push(n);
504        cursor = rest;
505    }
506
507    Some(result)
508}
509
510#[allow(dead_code)]
511/// Scan `data` for all indirect-reference tokens (`N G R`) and return the object numbers.
512///
513/// Tokenizes on ASCII whitespace and common PDF delimiters, then looks for triplets
514/// where the first two tokens are decimal integers and the third is `R`. This approach
515/// may produce false positives for binary streams; for the PDFs this library generates
516/// that is not an issue because content streams use operators, not object references.
517fn extract_indirect_refs(data: &[u8]) -> HashSet<u32> {
518    let mut refs = HashSet::new();
519
520    // Collect whitespace/delimiter-separated tokens as byte slices.
521    let is_delim = |b: u8| b.is_ascii_whitespace() || matches!(b, b'<' | b'>' | b'[' | b']');
522    let mut tokens: Vec<&[u8]> = Vec::new();
523    let mut cursor = data;
524
525    loop {
526        // Skip delimiters
527        let start = cursor.iter().position(|&b| !is_delim(b));
528        let Some(start) = start else { break };
529        cursor = &cursor[start..];
530
531        // Read token up to next delimiter
532        let end = cursor
533            .iter()
534            .position(|&b| is_delim(b))
535            .unwrap_or(cursor.len());
536        tokens.push(&cursor[..end]);
537        cursor = &cursor[end..];
538    }
539
540    // Scan for "N G R" triplets
541    let mut i = 0;
542    while i + 2 < tokens.len() {
543        let a = tokens[i];
544        let b = tokens[i + 1];
545        let c = tokens[i + 2];
546
547        if c == b"R"
548            && a.iter().all(|&x| x.is_ascii_digit())
549            && b.iter().all(|&x| x.is_ascii_digit())
550            && !a.is_empty()
551        {
552            if let Ok(s) = std::str::from_utf8(a) {
553                if let Ok(n) = s.parse::<u32>() {
554                    refs.insert(n);
555                    i += 3;
556                    continue;
557                }
558            }
559        }
560        i += 1;
561    }
562
563    refs
564}
565
566// ── Token / byte utilities ─────────────────────────────────────────────────────
567
568/// Parse `<<...>>` dictionary bytes into a flat `key → first-token-of-value` map.
569///
570/// Values that are indirect references (`N G R`) are stored as just the object
571/// number string. Nested dictionaries and arrays are skipped.
572fn parse_dict_bytes(data: &[u8]) -> Option<HashMap<String, String>> {
573    let data = skip_ascii_whitespace(data);
574    if !data.starts_with(b"<<") {
575        return None;
576    }
577
578    let mut map = HashMap::new();
579    let mut cursor = &data[2..];
580
581    loop {
582        cursor = skip_ascii_whitespace(cursor);
583
584        if cursor.starts_with(b">>") {
585            break;
586        }
587
588        // Expect a name key: /KeyName
589        if !cursor.starts_with(b"/") {
590            // Skip unknown token
591            let (_, rest) = next_token(cursor)?;
592            cursor = rest;
593            continue;
594        }
595
596        let (key, after_key) = next_token(&cursor[1..])?;
597        cursor = skip_ascii_whitespace(after_key);
598
599        // Read the value — we only need the first token (object number for refs)
600        if cursor.starts_with(b"<<") {
601            // Nested dict: skip to matching >>
602            cursor = skip_nested_dict(cursor)?;
603        } else if cursor.starts_with(b"[") {
604            // Array: skip to ]
605            cursor = skip_array(cursor)?;
606        } else if cursor.starts_with(b"(") {
607            // Literal string: skip to closing )
608            cursor = skip_literal_string(cursor)?;
609        } else {
610            let (val, rest) = next_token(cursor)?;
611            cursor = skip_ascii_whitespace(rest);
612
613            // If it looks like an indirect reference (val=N, next="G R"), capture just N
614            if let Some((gen_str, after_gen)) = next_token(cursor) {
615                let after_gen_ws = skip_ascii_whitespace(after_gen);
616                if let Some((r_str, after_r)) = next_token(after_gen_ws) {
617                    if r_str == "R"
618                        && val.chars().all(|c| c.is_ascii_digit())
619                        && gen_str.chars().all(|c| c.is_ascii_digit())
620                    {
621                        map.insert(key.to_string(), val.to_string());
622                        cursor = after_r;
623                        continue;
624                    }
625                }
626                // Not a reference: store the raw value token
627                map.insert(key.to_string(), val.to_string());
628            } else {
629                map.insert(key.to_string(), val.to_string());
630            }
631        }
632    }
633
634    Some(map)
635}
636
637/// Skip over a `<<...>>` block (with nested dicts), returning bytes after `>>`.
638///
639/// Depth tracking: each `<<` increments depth, each `>>` decrements. We return
640/// after the `>>` that brings depth back to zero (i.e. the outer closing `>>`).
641fn skip_nested_dict(data: &[u8]) -> Option<&[u8]> {
642    debug_assert!(data.starts_with(b"<<"));
643    let mut depth = 0i32;
644    let mut i = 0;
645    while i < data.len() {
646        if data[i..].starts_with(b"<<") {
647            depth += 1;
648            i += 2;
649        } else if data[i..].starts_with(b">>") {
650            depth -= 1;
651            i += 2;
652            if depth == 0 {
653                return Some(&data[i..]);
654            }
655        } else {
656            i += 1;
657        }
658    }
659    None
660}
661
662/// Skip over a `[...]` array, returning bytes after `]`.
663fn skip_array(data: &[u8]) -> Option<&[u8]> {
664    debug_assert!(data.starts_with(b"["));
665    let pos = data.iter().position(|&b| b == b']')?;
666    Some(&data[pos + 1..])
667}
668
669/// Skip over a `(...)` literal string (handles backslash escapes), returning bytes after `)`.
670fn skip_literal_string(data: &[u8]) -> Option<&[u8]> {
671    debug_assert!(data.starts_with(b"("));
672    let mut i = 1;
673    let mut depth = 1i32;
674    while i < data.len() {
675        match data[i] {
676            b'\\' => i += 2,
677            b'(' => {
678                depth += 1;
679                i += 1;
680            }
681            b')' => {
682                depth -= 1;
683                i += 1;
684                if depth == 0 {
685                    return Some(&data[i..]);
686                }
687            }
688            _ => i += 1,
689        }
690    }
691    None
692}
693
694/// Skip "N G obj" indirect object header, returning bytes after "obj".
695fn skip_obj_header(data: &[u8]) -> Option<&[u8]> {
696    let (_, rest) = next_token(data)?; // object number
697    let rest = skip_ascii_whitespace(rest);
698    let (_, rest) = next_token(rest)?; // generation number
699    let rest = skip_ascii_whitespace(rest);
700    let (keyword, rest) = next_token(rest)?; // "obj"
701    if keyword != "obj" {
702        return None;
703    }
704    Some(rest)
705}
706
707/// Return a sub-slice starting at the first non-whitespace byte.
708fn skip_ascii_whitespace(data: &[u8]) -> &[u8] {
709    let pos = data
710        .iter()
711        .position(|&b| !b.is_ascii_whitespace())
712        .unwrap_or(data.len());
713    &data[pos..]
714}
715
716/// Skip to the end of the current line (past `\n` or `\r\n`).
717fn skip_line(data: &[u8]) -> &[u8] {
718    let pos = data
719        .iter()
720        .position(|&b| b == b'\n')
721        .unwrap_or(data.len().saturating_sub(1));
722    if pos + 1 < data.len() {
723        &data[pos + 1..]
724    } else {
725        &data[data.len()..]
726    }
727}
728
729/// Consume a literal byte sequence at the start of `data`, returning the remainder.
730fn consume_token<'a>(data: &'a [u8], token: &[u8]) -> Result<&'a [u8], PdfReadError> {
731    let trimmed = skip_ascii_whitespace(data);
732    if trimmed.starts_with(token) {
733        Ok(&trimmed[token.len()..])
734    } else {
735        Err(PdfReadError::MalformedXref)
736    }
737}
738
739/// Read the next whitespace-delimited token from `data`.
740/// Returns `(token_str, remaining_bytes)` or `None` if at end.
741fn next_token(data: &[u8]) -> Option<(&str, &[u8])> {
742    let data = skip_ascii_whitespace(data);
743    if data.is_empty() {
744        return None;
745    }
746    let end = data
747        .iter()
748        .position(|&b| b.is_ascii_whitespace() || b == b'<' || b == b'>')
749        .unwrap_or(data.len());
750    if end == 0 {
751        // Single delimiter character
752        let token = std::str::from_utf8(&data[..1]).ok()?;
753        return Some((token, &data[1..]));
754    }
755    let token = std::str::from_utf8(&data[..end]).ok()?;
756    Some((token, &data[end..]))
757}
758
759/// Find the first non-whitespace token in `data` and parse it as a string.
760fn skip_whitespace_to_token(data: &[u8]) -> Option<&str> {
761    let (tok, _) = next_token(data)?;
762    Some(tok)
763}
764
765// ── Unit tests (pub(crate) surface) ────────────────────────────────────────────
766
767#[cfg(test)]
768mod tests {
769    use super::*;
770    use crate::document::{DocumentOptions, PdfDocument};
771
772    fn make_pdf(n: usize) -> Vec<u8> {
773        let mut doc = PdfDocument::new(Vec::new(), DocumentOptions::default()).unwrap();
774        for _ in 0..n {
775            doc.begin_page(612.0, 792.0);
776            doc.end_page().unwrap();
777        }
778        doc.end_document().unwrap()
779    }
780
781    // ── page_object_numbers ─────────────────────────────────────────────────────
782
783    #[test]
784    fn page_object_numbers_count_matches_page_count() {
785        for n in [0, 1, 3, 10] {
786            let bytes = make_pdf(n);
787            let reader = PdfReader::from_bytes(bytes).unwrap();
788            let nums = reader.page_object_numbers().unwrap();
789            assert_eq!(nums.len(), n, "expected {n} page objects");
790        }
791    }
792
793    #[test]
794    fn page_object_numbers_are_unique_and_positive() {
795        let bytes = make_pdf(5);
796        let reader = PdfReader::from_bytes(bytes).unwrap();
797        let nums = reader.page_object_numbers().unwrap();
798        assert!(
799            nums.iter().all(|&n| n > 0),
800            "all object numbers must be > 0"
801        );
802        let unique: HashSet<_> = nums.iter().collect();
803        assert_eq!(unique.len(), nums.len(), "object numbers must be unique");
804    }
805
806    #[test]
807    fn page_object_numbers_order_is_stable() {
808        // Calling twice must return the same sequence.
809        let bytes = make_pdf(4);
810        let reader = PdfReader::from_bytes(bytes).unwrap();
811        let first = reader.page_object_numbers().unwrap();
812        let second = reader.page_object_numbers().unwrap();
813        assert_eq!(first, second);
814    }
815
816    // ── collect_closure ─────────────────────────────────────────────────────────
817
818    #[test]
819    fn collect_closure_contains_seed_objects() {
820        let bytes = make_pdf(2);
821        let reader = PdfReader::from_bytes(bytes).unwrap();
822        let page_nums = reader.page_object_numbers().unwrap();
823        let closure = reader.collect_closure(&page_nums).unwrap();
824        for &n in &page_nums {
825            assert!(closure.contains(&n), "closure must include seed object {n}");
826        }
827    }
828
829    #[test]
830    fn collect_closure_includes_dependencies() {
831        // A page has at least a content stream and resource dictionary, so the
832        // closure should be larger than just the page object itself.
833        let bytes = make_pdf(1);
834        let reader = PdfReader::from_bytes(bytes).unwrap();
835        let page_nums = reader.page_object_numbers().unwrap();
836        let closure = reader.collect_closure(&page_nums).unwrap();
837        assert!(
838            closure.len() > page_nums.len(),
839            "closure must include objects beyond the page nodes"
840        );
841    }
842
843    #[test]
844    fn collect_closure_empty_roots_returns_empty() {
845        let bytes = make_pdf(1);
846        let reader = PdfReader::from_bytes(bytes).unwrap();
847        let closure = reader.collect_closure(&[]).unwrap();
848        assert!(closure.is_empty());
849    }
850
851    // ── raw_object_bytes ────────────────────────────────────────────────────────
852
853    #[test]
854    fn raw_object_bytes_starts_with_obj_header_and_ends_with_endobj() {
855        let bytes = make_pdf(1);
856        let reader = PdfReader::from_bytes(bytes).unwrap();
857        let page_num = reader.page_object_numbers().unwrap()[0];
858        let raw = reader.raw_object_bytes(page_num).unwrap();
859
860        // Must contain " obj" (the indirect object header keyword).
861        assert!(raw.windows(4).any(|w| w == b" obj"), "must contain ' obj'");
862        // Must end with "endobj".
863        assert!(raw.ends_with(b"endobj"), "must end with 'endobj'");
864    }
865
866    #[test]
867    fn raw_object_bytes_error_on_missing_object() {
868        let bytes = make_pdf(1);
869        let reader = PdfReader::from_bytes(bytes).unwrap();
870        assert!(
871            reader.raw_object_bytes(99999).is_err(),
872            "non-existent object must return Err"
873        );
874    }
875
876    // ── Parser regression tests ─────────────────────────────────────────────────
877
878    // Regression test for the skip_nested_dict depth-tracking bug:
879    // previously checked `depth == 0` before decrementing, which caused the
880    // outer dict's closing `>>` to be consumed when a nested dict was present.
881    #[test]
882    fn parse_dict_bytes_handles_nested_resources_dict() {
883        // Simulate a page dict with an inline Resources dict like our writer produces.
884        let dict = b"<< /Type /Page /Resources << /Font << >> >> >>";
885        let map = parse_dict_bytes(dict).expect("should parse successfully");
886        assert_eq!(
887            map.get("Type").map(String::as_str),
888            Some("/Page"),
889            "Type must be /Page, not overwritten by the inner Pages object"
890        );
891    }
892
893    #[test]
894    fn skip_nested_dict_returns_after_matching_close() {
895        // The slice after `<<...>>` should be exactly b" tail".
896        let data = b"<< /K << >> >> tail";
897        let rest = skip_nested_dict(data).expect("should find closing >>");
898        assert_eq!(rest, b" tail");
899    }
900}