oxidize_pdf/parser/
lexer.rs

1//! PDF Lexer
2//!
3//! Tokenizes PDF syntax according to ISO 32000-1 Section 7.2
4
5use super::{ParseError, ParseOptions, ParseResult, ParseWarning};
6use std::io::{Read, Seek, SeekFrom};
7
8/// PDF Token types
9#[derive(Debug, Clone, PartialEq)]
10pub enum Token {
11    /// Boolean: true or false
12    Boolean(bool),
13
14    /// Integer number
15    Integer(i64),
16
17    /// Real number
18    Real(f64),
19
20    /// String (literal or hexadecimal)
21    String(Vec<u8>),
22
23    /// Name object (e.g., /Type)
24    Name(String),
25
26    /// Left square bracket [
27    ArrayStart,
28
29    /// Right square bracket ]
30    ArrayEnd,
31
32    /// Dictionary start <<
33    DictStart,
34
35    /// Dictionary end >>
36    DictEnd,
37
38    /// Stream keyword
39    Stream,
40
41    /// Endstream keyword
42    EndStream,
43
44    /// Obj keyword
45    Obj,
46
47    /// Endobj keyword
48    EndObj,
49
50    /// StartXRef keyword
51    StartXRef,
52
53    /// Reference (e.g., 1 0 R)
54    Reference(u32, u16),
55
56    /// Null object
57    Null,
58
59    /// Comment (usually ignored)
60    Comment(String),
61
62    /// End of file
63    Eof,
64}
65
66/// PDF Lexer for tokenizing PDF content
67pub struct Lexer<R> {
68    reader: std::io::BufReader<R>,
69    #[allow(dead_code)]
70    buffer: Vec<u8>,
71    position: usize,
72    peek_buffer: Option<u8>,
73    token_buffer: Vec<Token>,
74    options: ParseOptions,
75    warnings: Vec<ParseWarning>,
76}
77
78impl<R: Read> Lexer<R> {
79    /// Create a new lexer from a reader with default options
80    pub fn new(reader: R) -> Self {
81        Self::new_with_options(reader, ParseOptions::default())
82    }
83
84    /// Create a new lexer from a reader with custom options
85    pub fn new_with_options(reader: R, options: ParseOptions) -> Self {
86        Self {
87            reader: std::io::BufReader::new(reader),
88            buffer: Vec::with_capacity(1024),
89            position: 0,
90            peek_buffer: None,
91            token_buffer: Vec::new(),
92            options,
93            warnings: Vec::new(),
94        }
95    }
96
97    /// Get warnings collected during lexing (if enabled)
98    pub fn warnings(&self) -> &[ParseWarning] {
99        &self.warnings
100    }
101
102    /// Get the next token
103    pub fn next_token(&mut self) -> ParseResult<Token> {
104        // Check if we have a pushed-back token
105        if let Some(token) = self.token_buffer.pop() {
106            return Ok(token);
107        }
108
109        self.skip_whitespace()?;
110
111        let ch = match self.peek_char()? {
112            Some(ch) => ch,
113            None => return Ok(Token::Eof),
114        };
115
116        match ch {
117            b'%' => self.read_comment(),
118            b'/' => self.read_name(),
119            b'(' => self.read_literal_string(),
120            b'<' => self.read_angle_bracket(),
121            b'>' => {
122                self.consume_char()?;
123                if self.peek_char()? == Some(b'>') {
124                    self.consume_char()?;
125                    Ok(Token::DictEnd)
126                } else {
127                    Err(ParseError::SyntaxError {
128                        position: self.position,
129                        message: "Expected '>' after '>'".to_string(),
130                    })
131                }
132            }
133            b'[' => {
134                self.consume_char()?;
135                Ok(Token::ArrayStart)
136            }
137            b']' => {
138                self.consume_char()?;
139                Ok(Token::ArrayEnd)
140            }
141            b't' | b'f' => self.read_boolean(),
142            b'n' => self.read_null(),
143            b'+' | b'-' | b'0'..=b'9' | b'.' => self.read_number(),
144            b'R' => {
145                // R could be a keyword (for references)
146                self.consume_char()?;
147                Ok(Token::Name("R".to_string()))
148            }
149            _ if ch.is_ascii_alphabetic() => self.read_keyword(),
150            _ => {
151                // Check if this is a problematic encoding character
152                if self.is_problematic_encoding_char(ch) {
153                    self.handle_encoding_char_in_token_stream(ch)
154                } else {
155                    Err(ParseError::SyntaxError {
156                        position: self.position,
157                        message: format!("Unexpected character: {}", ch as char),
158                    })
159                }
160            }
161        }
162    }
163
164    /// Peek at the next character without consuming it
165    fn peek_char(&mut self) -> ParseResult<Option<u8>> {
166        if let Some(ch) = self.peek_buffer {
167            return Ok(Some(ch));
168        }
169
170        let mut buf = [0u8; 1];
171        match self.reader.read_exact(&mut buf) {
172            Ok(_) => {
173                self.peek_buffer = Some(buf[0]);
174                Ok(Some(buf[0]))
175            }
176            Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => Ok(None),
177            Err(e) => Err(e.into()),
178        }
179    }
180
181    /// Consume the next character
182    fn consume_char(&mut self) -> ParseResult<Option<u8>> {
183        let ch = self.peek_char()?;
184        if ch.is_some() {
185            self.peek_buffer = None;
186            self.position += 1;
187        }
188        Ok(ch)
189    }
190
191    /// Skip whitespace and return the number of bytes skipped
192    pub(crate) fn skip_whitespace(&mut self) -> ParseResult<usize> {
193        let mut count = 0;
194        while let Some(ch) = self.peek_char()? {
195            if ch.is_ascii_whitespace() {
196                self.consume_char()?;
197                count += 1;
198            } else {
199                break;
200            }
201        }
202        Ok(count)
203    }
204
205    /// Read a comment (from % to end of line)
206    fn read_comment(&mut self) -> ParseResult<Token> {
207        self.consume_char()?; // consume '%'
208        let mut comment = String::new();
209
210        while let Some(ch) = self.peek_char()? {
211            if ch == b'\n' || ch == b'\r' {
212                break;
213            }
214            self.consume_char()?;
215            comment.push(ch as char);
216        }
217
218        Ok(Token::Comment(comment))
219    }
220
221    /// Read a name object (e.g., /Type)
222    fn read_name(&mut self) -> ParseResult<Token> {
223        self.consume_char()?; // consume '/'
224        let mut name = String::new();
225
226        while let Some(ch) = self.peek_char()? {
227            if ch.is_ascii_whitespace()
228                || matches!(ch, b'/' | b'<' | b'>' | b'[' | b']' | b'(' | b')' | b'%')
229            {
230                break;
231            }
232            self.consume_char()?;
233
234            // Handle hex codes in names (e.g., /A#20B means /A B)
235            if ch == b'#' {
236                let hex1 = self
237                    .consume_char()?
238                    .ok_or_else(|| ParseError::SyntaxError {
239                        position: self.position,
240                        message: "Incomplete hex code in name".to_string(),
241                    })?;
242                let hex2 = self
243                    .consume_char()?
244                    .ok_or_else(|| ParseError::SyntaxError {
245                        position: self.position,
246                        message: "Incomplete hex code in name".to_string(),
247                    })?;
248
249                let value = u8::from_str_radix(&format!("{}{}", hex1 as char, hex2 as char), 16)
250                    .map_err(|_| ParseError::SyntaxError {
251                        position: self.position,
252                        message: "Invalid hex code in name".to_string(),
253                    })?;
254
255                name.push(value as char);
256            } else {
257                name.push(ch as char);
258            }
259        }
260
261        Ok(Token::Name(name))
262    }
263
264    /// Read a literal string (parentheses)
265    fn read_literal_string(&mut self) -> ParseResult<Token> {
266        self.consume_char()?; // consume '('
267        let mut string = Vec::new();
268        let mut paren_depth = 1;
269        let mut escape = false;
270
271        while paren_depth > 0 {
272            let ch = match self.consume_char()? {
273                Some(c) => c,
274                None => {
275                    if self.options.lenient_syntax {
276                        // In lenient mode, return what we have so far
277                        if self.options.collect_warnings {
278                            self.warnings.push(ParseWarning::SyntaxErrorRecovered {
279                                position: self.position,
280                                expected: "closing parenthesis".to_string(),
281                                found: "EOF".to_string(),
282                                recovery_action: "returned partial string content".to_string(),
283                            });
284                        }
285                        break;
286                    } else {
287                        return Err(ParseError::SyntaxError {
288                            position: self.position,
289                            message: "Unterminated string".to_string(),
290                        });
291                    }
292                }
293            };
294
295            if escape {
296                let escaped = match ch {
297                    b'n' => b'\n',
298                    b'r' => b'\r',
299                    b't' => b'\t',
300                    b'b' => b'\x08',
301                    b'f' => b'\x0C',
302                    b'(' => b'(',
303                    b')' => b')',
304                    b'\\' => b'\\',
305                    b'0'..=b'7' => {
306                        // Octal escape sequence
307                        let mut value = ch - b'0';
308                        for _ in 0..2 {
309                            if let Some(next) = self.peek_char()? {
310                                if matches!(next, b'0'..=b'7') {
311                                    self.consume_char()?;
312                                    value = value * 8 + (next - b'0');
313                                } else {
314                                    break;
315                                }
316                            }
317                        }
318                        value
319                    }
320                    _ => ch, // Unknown escape, use literal
321                };
322                string.push(escaped);
323                escape = false;
324            } else {
325                match ch {
326                    b'\\' => escape = true,
327                    b'(' => {
328                        string.push(ch);
329                        paren_depth += 1;
330                    }
331                    b')' => {
332                        paren_depth -= 1;
333                        if paren_depth > 0 {
334                            string.push(ch);
335                        }
336                    }
337                    _ => string.push(ch),
338                }
339            }
340        }
341
342        // Apply character encoding recovery if enabled
343        let processed_string = if self.options.lenient_encoding {
344            self.process_string_with_encoding_recovery(&string)?
345        } else {
346            string
347        };
348
349        Ok(Token::String(processed_string))
350    }
351
352    /// Read angle bracket tokens (hex strings or dict markers)
353    fn read_angle_bracket(&mut self) -> ParseResult<Token> {
354        self.consume_char()?; // consume '<'
355
356        if self.peek_char()? == Some(b'<') {
357            self.consume_char()?;
358            Ok(Token::DictStart)
359        } else {
360            // Hex string
361            let mut hex_chars = String::new();
362            let mut found_end = false;
363
364            while let Some(ch) = self.peek_char()? {
365                if ch == b'>' {
366                    self.consume_char()?;
367                    found_end = true;
368                    break;
369                }
370                self.consume_char()?;
371                if ch.is_ascii_hexdigit() {
372                    hex_chars.push(ch as char);
373                } else if !ch.is_ascii_whitespace() {
374                    if self.options.lenient_syntax {
375                        // In lenient mode, skip invalid characters
376                        if self.options.collect_warnings {
377                            self.warnings.push(ParseWarning::SyntaxErrorRecovered {
378                                position: self.position,
379                                expected: "hex digit".to_string(),
380                                found: format!("'{}'", ch as char),
381                                recovery_action: "skipped invalid character".to_string(),
382                            });
383                        }
384                    } else {
385                        return Err(ParseError::SyntaxError {
386                            position: self.position,
387                            message: "Invalid character in hex string".to_string(),
388                        });
389                    }
390                }
391            }
392
393            if !found_end {
394                if self.options.lenient_syntax {
395                    // In lenient mode, return what we have so far
396                    if self.options.collect_warnings {
397                        self.warnings.push(ParseWarning::SyntaxErrorRecovered {
398                            position: self.position,
399                            expected: ">".to_string(),
400                            found: "EOF".to_string(),
401                            recovery_action: "returned partial hex string".to_string(),
402                        });
403                    }
404                } else {
405                    return Err(ParseError::SyntaxError {
406                        position: self.position,
407                        message: "Unterminated hex string".to_string(),
408                    });
409                }
410            }
411
412            // Pad with 0 if odd number of digits
413            if hex_chars.len() % 2 != 0 {
414                hex_chars.push('0');
415            }
416
417            // Convert hex to bytes
418            let mut bytes = Vec::new();
419            for chunk in hex_chars.as_bytes().chunks(2) {
420                let hex_str = std::str::from_utf8(chunk).map_err(|_| ParseError::SyntaxError {
421                    position: self.position,
422                    message: "Invalid UTF-8 in hex string".to_string(),
423                })?;
424                let byte =
425                    u8::from_str_radix(hex_str, 16).map_err(|_| ParseError::SyntaxError {
426                        position: self.position,
427                        message: "Invalid hex string".to_string(),
428                    })?;
429                bytes.push(byte);
430            }
431
432            Ok(Token::String(bytes))
433        }
434    }
435
436    /// Read boolean (true/false)
437    fn read_boolean(&mut self) -> ParseResult<Token> {
438        let word = self.read_word()?;
439        match word.as_str() {
440            "true" => Ok(Token::Boolean(true)),
441            "false" => Ok(Token::Boolean(false)),
442            _ => {
443                // Not a boolean, might be a keyword
444                self.process_keyword(word)
445            }
446        }
447    }
448
449    /// Read null
450    fn read_null(&mut self) -> ParseResult<Token> {
451        let word = self.read_word()?;
452        if word == "null" {
453            Ok(Token::Null)
454        } else {
455            // Not null, might be a keyword
456            self.process_keyword(word)
457        }
458    }
459
460    /// Read a number (integer or real)
461    fn read_number(&mut self) -> ParseResult<Token> {
462        let mut number_str = String::new();
463        let mut has_dot = false;
464
465        // Handle sign - consume it first
466        if let Some(ch) = self.peek_char()? {
467            if ch == b'+' || ch == b'-' {
468                self.consume_char()?;
469                number_str.push(ch as char);
470
471                // After sign, we must have at least one digit
472                if let Some(next) = self.peek_char()? {
473                    if !next.is_ascii_digit() && next != b'.' {
474                        return Err(ParseError::SyntaxError {
475                            position: self.position,
476                            message: "Expected digit after sign".to_string(),
477                        });
478                    }
479                }
480            }
481        }
482
483        // Read digits and decimal point
484        while let Some(ch) = self.peek_char()? {
485            match ch {
486                b'0'..=b'9' => {
487                    self.consume_char()?;
488                    number_str.push(ch as char);
489                }
490                b'.' if !has_dot => {
491                    self.consume_char()?;
492                    number_str.push(ch as char);
493                    has_dot = true;
494                }
495                _ => break,
496            }
497        }
498
499        // Handle scientific notation (e/E)
500        if let Some(ch) = self.peek_char()? {
501            if ch == b'e' || ch == b'E' {
502                self.consume_char()?;
503                number_str.push(ch as char);
504
505                // Check for optional sign after e/E
506                if let Some(sign_ch) = self.peek_char()? {
507                    if sign_ch == b'+' || sign_ch == b'-' {
508                        self.consume_char()?;
509                        number_str.push(sign_ch as char);
510                    }
511                }
512
513                // Read exponent digits
514                while let Some(digit_ch) = self.peek_char()? {
515                    if digit_ch.is_ascii_digit() {
516                        self.consume_char()?;
517                        number_str.push(digit_ch as char);
518                    } else {
519                        break;
520                    }
521                }
522
523                // Scientific notation always results in a real number
524                has_dot = true;
525            }
526        }
527
528        // Don't try to parse references here - let the parser handle it
529        // References are just "num num R" and can be handled at a higher level
530
531        // Parse as number
532        if has_dot {
533            let value = number_str
534                .parse::<f64>()
535                .map_err(|_| ParseError::SyntaxError {
536                    position: self.position,
537                    message: format!("Invalid real number: '{number_str}'"),
538                })?;
539            Ok(Token::Real(value))
540        } else {
541            let value = number_str
542                .parse::<i64>()
543                .map_err(|_| ParseError::SyntaxError {
544                    position: self.position,
545                    message: format!("Invalid integer: '{number_str}'"),
546                })?;
547            Ok(Token::Integer(value))
548        }
549    }
550
551    /// Read a keyword
552    fn read_keyword(&mut self) -> ParseResult<Token> {
553        let word = self.read_word()?;
554        self.process_keyword(word)
555    }
556
557    /// Process a word as a keyword
558    fn process_keyword(&self, word: String) -> ParseResult<Token> {
559        match word.as_str() {
560            "stream" => Ok(Token::Stream),
561            "endstream" => Ok(Token::EndStream),
562            "obj" => Ok(Token::Obj),
563            "endobj" => Ok(Token::EndObj),
564            "startxref" => Ok(Token::StartXRef),
565            _ => Err(ParseError::SyntaxError {
566                position: self.position,
567                message: format!("Unknown keyword: {word}"),
568            }),
569        }
570    }
571
572    /// Read a word (sequence of non-delimiter characters)
573    fn read_word(&mut self) -> ParseResult<String> {
574        let mut word = String::new();
575
576        while let Some(ch) = self.peek_char()? {
577            if ch.is_ascii_whitespace()
578                || matches!(ch, b'/' | b'<' | b'>' | b'[' | b']' | b'(' | b')' | b'%')
579            {
580                break;
581            }
582            self.consume_char()?;
583            word.push(ch as char);
584        }
585
586        Ok(word)
587    }
588
589    /// Read a sequence of digits
590    #[allow(dead_code)]
591    fn read_digits(&mut self) -> ParseResult<String> {
592        let mut digits = String::new();
593
594        while let Some(ch) = self.peek_char()? {
595            if ch.is_ascii_digit() {
596                self.consume_char()?;
597                digits.push(ch as char);
598            } else {
599                break;
600            }
601        }
602
603        Ok(digits)
604    }
605
606    /// Read a newline sequence (CR, LF, or CRLF)
607    pub fn read_newline(&mut self) -> ParseResult<()> {
608        match self.peek_char()? {
609            Some(b'\r') => {
610                self.consume_char()?;
611                // Check for CRLF
612                if self.peek_char()? == Some(b'\n') {
613                    self.consume_char()?;
614                }
615                Ok(())
616            }
617            Some(b'\n') => {
618                self.consume_char()?;
619                Ok(())
620            }
621            _ => Err(ParseError::SyntaxError {
622                position: self.position,
623                message: "Expected newline".to_string(),
624            }),
625        }
626    }
627
628    /// Read exactly n bytes
629    /// Peek at the next byte without consuming it
630    pub fn peek_byte(&mut self) -> ParseResult<u8> {
631        match self.peek_char()? {
632            Some(b) => Ok(b),
633            None => Err(ParseError::UnexpectedToken {
634                expected: "byte".to_string(),
635                found: "EOF".to_string(),
636            }),
637        }
638    }
639
640    /// Read a single byte
641    pub fn read_byte(&mut self) -> ParseResult<u8> {
642        match self.consume_char()? {
643            Some(b) => Ok(b),
644            None => Err(ParseError::UnexpectedToken {
645                expected: "byte".to_string(),
646                found: "EOF".to_string(),
647            }),
648        }
649    }
650
651    /// Seek to a specific position
652    pub fn seek(&mut self, pos: u64) -> ParseResult<()>
653    where
654        R: Seek,
655    {
656        self.reader.seek(SeekFrom::Start(pos))?;
657        self.position = pos as usize;
658        Ok(())
659    }
660
661    pub fn read_bytes(&mut self, n: usize) -> ParseResult<Vec<u8>> {
662        let mut bytes = Vec::with_capacity(n);
663
664        // First consume any peeked byte to avoid duplication
665        if self.peek_buffer.is_some() && n > 0 {
666            bytes.push(self.consume_char()?.unwrap());
667        }
668
669        // Read remaining bytes directly
670        let remaining = n - bytes.len();
671        if remaining > 0 {
672            let mut rest = vec![0u8; remaining];
673            self.reader.read_exact(&mut rest)?;
674            self.position += remaining;
675            bytes.extend_from_slice(&rest);
676        }
677
678        Ok(bytes)
679    }
680
681    /// Read until a specific byte sequence is found
682    pub fn read_until_sequence(&mut self, sequence: &[u8]) -> ParseResult<Vec<u8>> {
683        let mut result = Vec::new();
684        let mut match_pos = 0;
685
686        while let Some(ch) = self.consume_char()? {
687            result.push(ch);
688
689            if ch == sequence[match_pos] {
690                match_pos += 1;
691                if match_pos == sequence.len() {
692                    // Found the sequence, remove it from result
693                    result.truncate(result.len() - sequence.len());
694                    break;
695                }
696            } else if ch == sequence[0] {
697                match_pos = 1;
698            } else {
699                match_pos = 0;
700            }
701        }
702
703        if match_pos < sequence.len() {
704            return Err(ParseError::SyntaxError {
705                position: self.position,
706                message: format!("Sequence {sequence:?} not found"),
707            });
708        }
709
710        Ok(result)
711    }
712
713    /// Get current position
714    pub fn position(&self) -> usize {
715        self.position
716    }
717
718    /// Push back a token to be returned by the next call to next_token
719    pub fn push_token(&mut self, token: Token) {
720        self.token_buffer.push(token);
721    }
722
723    /// Expect a specific keyword token
724    pub fn expect_keyword(&mut self, keyword: &str) -> ParseResult<()> {
725        let token = self.next_token()?;
726        match (keyword, &token) {
727            ("endstream", Token::EndStream) => Ok(()),
728            ("stream", Token::Stream) => Ok(()),
729            ("endobj", Token::EndObj) => Ok(()),
730            ("obj", Token::Obj) => Ok(()),
731            ("startxref", Token::StartXRef) => Ok(()),
732            _ => Err(ParseError::UnexpectedToken {
733                expected: format!("keyword '{keyword}'"),
734                found: format!("{token:?}"),
735            }),
736        }
737    }
738
739    /// Find a keyword ahead in the stream without consuming bytes
740    /// Returns the number of bytes until the keyword is found
741    pub fn find_keyword_ahead(
742        &mut self,
743        keyword: &str,
744        max_bytes: usize,
745    ) -> ParseResult<Option<usize>>
746    where
747        R: Seek,
748    {
749        use std::io::{Read, Seek, SeekFrom};
750
751        // Save current position
752        let current_pos = self.reader.stream_position()?;
753        let start_buffer_state = self.peek_buffer;
754
755        let keyword_bytes = keyword.as_bytes();
756        let mut bytes_read = 0;
757        let mut match_buffer = Vec::new();
758
759        // Search for the keyword
760        while bytes_read < max_bytes {
761            let mut byte = [0u8; 1];
762            match self.reader.read_exact(&mut byte) {
763                Ok(_) => {
764                    bytes_read += 1;
765                    match_buffer.push(byte[0]);
766
767                    // Keep only the last keyword.len() bytes in match_buffer
768                    if match_buffer.len() > keyword_bytes.len() {
769                        match_buffer.remove(0);
770                    }
771
772                    // Check if we found the keyword
773                    if match_buffer.len() == keyword_bytes.len() && match_buffer == keyword_bytes {
774                        // Restore position
775                        self.reader.seek(SeekFrom::Start(current_pos))?;
776                        self.peek_buffer = start_buffer_state;
777                        return Ok(Some(bytes_read - keyword_bytes.len()));
778                    }
779                }
780                Err(_) => break, // EOF or error
781            }
782        }
783
784        // Restore position
785        self.reader.seek(SeekFrom::Start(current_pos))?;
786        self.peek_buffer = start_buffer_state;
787        Ok(None)
788    }
789
790    /// Peek ahead n bytes without consuming them
791    pub fn peek_ahead(&mut self, n: usize) -> ParseResult<Vec<u8>>
792    where
793        R: Seek,
794    {
795        use std::io::{Read, Seek, SeekFrom};
796
797        // Save current position
798        let current_pos = self.reader.stream_position()?;
799        let start_buffer_state = self.peek_buffer;
800
801        // Read n bytes
802        let mut bytes = vec![0u8; n];
803        let bytes_read = self.reader.read(&mut bytes)?;
804        bytes.truncate(bytes_read);
805
806        // Restore position
807        self.reader.seek(SeekFrom::Start(current_pos))?;
808        self.peek_buffer = start_buffer_state;
809
810        Ok(bytes)
811    }
812
813    /// Save the current position for later restoration
814    pub fn save_position(&mut self) -> ParseResult<(u64, Option<u8>)>
815    where
816        R: Seek,
817    {
818        use std::io::Seek;
819        let pos = self.reader.stream_position()?;
820        Ok((pos, self.peek_buffer))
821    }
822
823    /// Restore a previously saved position
824    pub fn restore_position(&mut self, saved: (u64, Option<u8>)) -> ParseResult<()>
825    where
826        R: Seek,
827    {
828        use std::io::{Seek, SeekFrom};
829        self.reader.seek(SeekFrom::Start(saved.0))?;
830        self.peek_buffer = saved.1;
831        self.position = saved.0 as usize;
832        Ok(())
833    }
834
835    /// Peek the next token without consuming it
836    pub fn peek_token(&mut self) -> ParseResult<Token>
837    where
838        R: Seek,
839    {
840        let saved_pos = self.save_position()?;
841        let token = self.next_token()?;
842        self.restore_position(saved_pos)?;
843        Ok(token)
844    }
845
846    /// Process string bytes with enhanced character encoding recovery
847    fn process_string_with_encoding_recovery(
848        &mut self,
849        string_bytes: &[u8],
850    ) -> ParseResult<Vec<u8>> {
851        use super::encoding::{CharacterDecoder, EncodingOptions, EncodingType, EnhancedDecoder};
852
853        // First check for common problematic bytes that need special handling
854        let has_problematic_chars = string_bytes.iter().any(|&b| {
855            // Control characters and Latin-1 supplement range that often cause issues
856            (0x80..=0x9F).contains(&b)
857                || b == 0x07
858                || (b <= 0x1F && b != 0x09 && b != 0x0A && b != 0x0D)
859        });
860
861        let decoder = EnhancedDecoder::new();
862
863        // Use more aggressive encoding options if problematic characters detected
864        let encoding_options = if has_problematic_chars {
865            EncodingOptions {
866                lenient_mode: true, // Always use lenient mode for problematic chars
867                preferred_encoding: Some(EncodingType::Windows1252), // Try Windows-1252 first for control chars
868                max_replacements: std::cmp::max(100, string_bytes.len() / 10), // More generous replacement limit
869                log_issues: self.options.collect_warnings,
870            }
871        } else {
872            EncodingOptions {
873                lenient_mode: self.options.lenient_encoding,
874                preferred_encoding: self.options.preferred_encoding,
875                max_replacements: 50,
876                log_issues: self.options.collect_warnings,
877            }
878        };
879
880        match decoder.decode(string_bytes, &encoding_options) {
881            Ok(result) => {
882                // Log warning if replacements were made or problematic chars detected
883                if (result.replacement_count > 0 || has_problematic_chars)
884                    && self.options.collect_warnings
885                {
886                    self.warnings.push(ParseWarning::InvalidEncoding {
887                        position: self.position,
888                        recovered_text: if result.text.len() > 50 {
889                            // Safe character boundary truncation
890                            let truncate_at = result
891                                .text
892                                .char_indices()
893                                .map(|(i, _)| i)
894                                .nth(47)
895                                .unwrap_or(result.text.len().min(47));
896                            format!(
897                                "{}... (truncated, {} chars total)",
898                                &result.text[..truncate_at],
899                                result.text.chars().count()
900                            )
901                        } else {
902                            result.text.clone()
903                        },
904                        encoding_used: result.detected_encoding,
905                        replacement_count: result.replacement_count,
906                    });
907                }
908
909                // Convert back to bytes
910                Ok(result.text.into_bytes())
911            }
912            Err(encoding_error) => {
913                if self.options.lenient_encoding {
914                    // Enhanced fallback strategy
915                    let fallback_result = self.apply_fallback_encoding_strategy(string_bytes);
916
917                    if self.options.collect_warnings {
918                        self.warnings.push(ParseWarning::InvalidEncoding {
919                            position: self.position,
920                            recovered_text: format!(
921                                "Fallback strategy applied: {} -> {} chars",
922                                string_bytes.len(),
923                                fallback_result.len()
924                            ),
925                            encoding_used: None,
926                            replacement_count: string_bytes.len(),
927                        });
928                    }
929                    Ok(fallback_result)
930                } else {
931                    Err(ParseError::CharacterEncodingError {
932                        position: self.position,
933                        message: format!(
934                            "Failed to decode string with any supported encoding: {encoding_error}"
935                        ),
936                    })
937                }
938            }
939        }
940    }
941
942    /// Apply fallback encoding strategy for severely corrupted strings
943    fn apply_fallback_encoding_strategy(&self, string_bytes: &[u8]) -> Vec<u8> {
944        let mut result = Vec::with_capacity(string_bytes.len());
945
946        for &byte in string_bytes {
947            match byte {
948                // Replace common problematic control characters with safe alternatives
949                0x00..=0x08 | 0x0B | 0x0C | 0x0E..=0x1F => {
950                    result.push(b' '); // Replace control chars with space
951                }
952                0x80..=0x9F => {
953                    // Windows-1252 control character range - try to map to reasonable alternatives
954                    let replacement = match byte {
955                        0x80 => b'E',  // Euro sign -> E
956                        0x81 => b' ',  // Undefined -> space
957                        0x82 => b',',  // Single low-9 quotation mark -> comma
958                        0x83 => b'f',  // Latin small letter f with hook -> f
959                        0x84 => b'"',  // Double low-9 quotation mark -> quote
960                        0x85 => b'.',  // Horizontal ellipsis -> period
961                        0x86 => b'+',  // Dagger -> plus
962                        0x87 => b'+',  // Double dagger -> plus
963                        0x88 => b'^',  // Modifier letter circumflex accent -> caret
964                        0x89 => b'%',  // Per mille sign -> percent
965                        0x8A => b'S',  // Latin capital letter S with caron -> S
966                        0x8B => b'<',  // Single left-pointing angle quotation mark
967                        0x8C => b'O',  // Latin capital ligature OE -> O
968                        0x8D => b' ',  // Undefined -> space
969                        0x8E => b'Z',  // Latin capital letter Z with caron -> Z
970                        0x8F => b' ',  // Undefined -> space
971                        0x90 => b' ',  // Undefined -> space
972                        0x91 => b'\'', // Left single quotation mark
973                        0x92 => b'\'', // Right single quotation mark
974                        0x93 => b'"',  // Left double quotation mark
975                        0x94 => b'"',  // Right double quotation mark
976                        0x95 => b'*',  // Bullet -> asterisk
977                        0x96 => b'-',  // En dash -> hyphen
978                        0x97 => b'-',  // Em dash -> hyphen
979                        0x98 => b'~',  // Small tilde
980                        0x99 => b'T',  // Trade mark sign -> T
981                        0x9A => b's',  // Latin small letter s with caron -> s
982                        0x9B => b'>',  // Single right-pointing angle quotation mark
983                        0x9C => b'o',  // Latin small ligature oe -> o
984                        0x9D => b' ',  // Undefined -> space
985                        0x9E => b'z',  // Latin small letter z with caron -> z
986                        0x9F => b'Y',  // Latin capital letter Y with diaeresis -> Y
987                        _ => b'?',     // Fallback
988                    };
989                    result.push(replacement);
990                }
991                _ => {
992                    result.push(byte); // Keep valid bytes as-is
993                }
994            }
995        }
996
997        result
998    }
999
1000    /// Check if a character is likely a problematic encoding character
1001    fn is_problematic_encoding_char(&self, ch: u8) -> bool {
1002        // Control characters and Latin-1 supplement range that often indicate encoding issues
1003        (0x80..=0x9F).contains(&ch) ||
1004        ch == 0x07 || // Bell character
1005        (ch <= 0x1F && ch != 0x09 && ch != 0x0A && ch != 0x0D) || // Control chars except tab, LF, CR
1006        // In lenient mode, also handle extended Latin-1 characters that may appear in corrupted streams
1007        (self.options.lenient_syntax && ch >= 0xA0) // Extended Latin-1 range (u8 max is 0xFF)
1008    }
1009
1010    /// Handle problematic encoding characters in the main token stream
1011    fn handle_encoding_char_in_token_stream(&mut self, ch: u8) -> ParseResult<Token> {
1012        if self.options.lenient_encoding {
1013            // Consume the problematic character and continue
1014            self.consume_char()?;
1015
1016            // Log warning about the character recovery
1017            if self.options.collect_warnings {
1018                let replacement_char = match ch {
1019                    0x07 => "bell",
1020                    0x00..=0x1F => "control",
1021                    0x80..=0x9F => "latin1-supplement",
1022                    _ => "unknown",
1023                };
1024
1025                self.warnings.push(ParseWarning::InvalidEncoding {
1026                    position: self.position,
1027                    recovered_text: format!(
1028                        "Skipped problematic {replacement_char} character (0x{ch:02X})"
1029                    ),
1030                    encoding_used: None,
1031                    replacement_count: 1,
1032                });
1033            }
1034
1035            // Skip this character and try to get the next token
1036            self.skip_whitespace()?;
1037            if let Ok(Some(_)) = self.peek_char() {
1038                self.next_token() // Recursively try next token
1039            } else {
1040                Err(ParseError::SyntaxError {
1041                    position: self.position,
1042                    message: "Unexpected end of file after problematic character".to_string(),
1043                })
1044            }
1045        } else {
1046            // In strict mode, generate a more descriptive error
1047            let char_description = match ch {
1048                0x07 => "Bell character (\\u{07})".to_string(),
1049                0x00..=0x1F => format!("Control character (\\u{{{ch:02X}}})"),
1050                0x80..=0x9F => format!("Latin-1 supplement character (\\u{{{ch:02X}}})"),
1051                _ => format!("Problematic character (\\u{{{ch:02X}}})"),
1052            };
1053
1054            Err(ParseError::CharacterEncodingError {
1055                position: self.position,
1056                message: format!(
1057                    "Unexpected character: {char_description} - Consider using lenient parsing mode"
1058                ),
1059            })
1060        }
1061    }
1062}
1063
1064#[cfg(test)]
1065mod tests {
1066    use super::*;
1067    use std::io::Cursor;
1068
1069    #[test]
1070    fn test_lexer_basic_tokens() {
1071        // Test positive and negative numbers
1072        let input = b"123 -456 3.14 true false null /Name";
1073        let mut lexer = Lexer::new(Cursor::new(input));
1074
1075        assert_eq!(lexer.next_token().unwrap(), Token::Integer(123));
1076        assert_eq!(lexer.next_token().unwrap(), Token::Integer(-456));
1077        assert_eq!(lexer.next_token().unwrap(), Token::Real(3.14));
1078        assert_eq!(lexer.next_token().unwrap(), Token::Boolean(true));
1079        assert_eq!(lexer.next_token().unwrap(), Token::Boolean(false));
1080        assert_eq!(lexer.next_token().unwrap(), Token::Null);
1081        assert_eq!(lexer.next_token().unwrap(), Token::Name("Name".to_string()));
1082        assert_eq!(lexer.next_token().unwrap(), Token::Eof);
1083    }
1084
1085    #[test]
1086    fn test_lexer_negative_numbers() {
1087        // Test negative numbers without space
1088        let input = b"-123 -45.67";
1089        let mut lexer = Lexer::new(Cursor::new(input));
1090
1091        assert_eq!(lexer.next_token().unwrap(), Token::Integer(-123));
1092        assert_eq!(lexer.next_token().unwrap(), Token::Real(-45.67));
1093    }
1094
1095    #[test]
1096    fn test_lexer_strings() {
1097        let input = b"(Hello World) <48656C6C6F>";
1098        let mut lexer = Lexer::new(Cursor::new(input));
1099
1100        assert_eq!(
1101            lexer.next_token().unwrap(),
1102            Token::String(b"Hello World".to_vec())
1103        );
1104        assert_eq!(
1105            lexer.next_token().unwrap(),
1106            Token::String(b"Hello".to_vec())
1107        );
1108    }
1109
1110    #[test]
1111    fn test_lexer_dictionaries() {
1112        let input = b"<< /Type /Page >>";
1113        let mut lexer = Lexer::new(Cursor::new(input));
1114
1115        assert_eq!(lexer.next_token().unwrap(), Token::DictStart);
1116        assert_eq!(lexer.next_token().unwrap(), Token::Name("Type".to_string()));
1117        assert_eq!(lexer.next_token().unwrap(), Token::Name("Page".to_string()));
1118        assert_eq!(lexer.next_token().unwrap(), Token::DictEnd);
1119    }
1120
1121    #[test]
1122    fn test_lexer_arrays() {
1123        let input = b"[1 2 3]";
1124        let mut lexer = Lexer::new(Cursor::new(input));
1125
1126        assert_eq!(lexer.next_token().unwrap(), Token::ArrayStart);
1127        assert_eq!(lexer.next_token().unwrap(), Token::Integer(1));
1128        assert_eq!(lexer.next_token().unwrap(), Token::Integer(2));
1129        assert_eq!(lexer.next_token().unwrap(), Token::Integer(3));
1130        assert_eq!(lexer.next_token().unwrap(), Token::ArrayEnd);
1131    }
1132
1133    #[test]
1134    fn test_lexer_references() {
1135        let input = b"1 0 R 25 1 R";
1136        let mut lexer = Lexer::new(Cursor::new(input));
1137
1138        // Now references are parsed as separate tokens
1139        assert_eq!(lexer.next_token().unwrap(), Token::Integer(1));
1140        assert_eq!(lexer.next_token().unwrap(), Token::Integer(0));
1141        // 'R' should be parsed as a keyword or name
1142        match lexer.next_token().unwrap() {
1143            Token::Name(s) if s == "R" => {} // Could be a name
1144            other => panic!("Expected R token, got {other:?}"),
1145        }
1146
1147        assert_eq!(lexer.next_token().unwrap(), Token::Integer(25));
1148        assert_eq!(lexer.next_token().unwrap(), Token::Integer(1));
1149        match lexer.next_token().unwrap() {
1150            Token::Name(s) if s == "R" => {} // Could be a name
1151            other => panic!("Expected R token, got {other:?}"),
1152        }
1153    }
1154
1155    #[test]
1156    fn test_lexer_comments() {
1157        let input = b"%PDF-1.7\n123";
1158        let mut lexer = Lexer::new(Cursor::new(input));
1159
1160        assert_eq!(
1161            lexer.next_token().unwrap(),
1162            Token::Comment("PDF-1.7".to_string())
1163        );
1164        assert_eq!(lexer.next_token().unwrap(), Token::Integer(123));
1165    }
1166
1167    // Comprehensive tests for Lexer
1168    mod comprehensive_tests {
1169        use super::*;
1170        use std::io::Cursor;
1171
1172        #[test]
1173        fn test_token_debug_trait() {
1174            let token = Token::Integer(42);
1175            let debug_str = format!("{token:?}");
1176            assert!(debug_str.contains("Integer"));
1177            assert!(debug_str.contains("42"));
1178        }
1179
1180        #[test]
1181        fn test_token_clone() {
1182            let token = Token::String(b"hello".to_vec());
1183            let cloned = token.clone();
1184            assert_eq!(token, cloned);
1185        }
1186
1187        #[test]
1188        fn test_token_equality() {
1189            assert_eq!(Token::Integer(42), Token::Integer(42));
1190            assert_ne!(Token::Integer(42), Token::Integer(43));
1191            assert_eq!(Token::Boolean(true), Token::Boolean(true));
1192            assert_ne!(Token::Boolean(true), Token::Boolean(false));
1193            assert_eq!(Token::Null, Token::Null);
1194            assert_ne!(Token::Null, Token::Integer(0));
1195        }
1196
1197        #[test]
1198        fn test_lexer_empty_input() {
1199            let input = b"";
1200            let mut lexer = Lexer::new(Cursor::new(input));
1201            assert_eq!(lexer.next_token().unwrap(), Token::Eof);
1202        }
1203
1204        #[test]
1205        fn test_lexer_whitespace_only() {
1206            let input = b"   \t\n\r  ";
1207            let mut lexer = Lexer::new(Cursor::new(input));
1208            assert_eq!(lexer.next_token().unwrap(), Token::Eof);
1209        }
1210
1211        #[test]
1212        fn test_lexer_integer_edge_cases() {
1213            let input = b"0 +123 -0 9876543210";
1214            let mut lexer = Lexer::new(Cursor::new(input));
1215
1216            assert_eq!(lexer.next_token().unwrap(), Token::Integer(0));
1217            assert_eq!(lexer.next_token().unwrap(), Token::Integer(123));
1218            assert_eq!(lexer.next_token().unwrap(), Token::Integer(0));
1219            assert_eq!(lexer.next_token().unwrap(), Token::Integer(9876543210));
1220        }
1221
1222        #[test]
1223        fn test_lexer_real_edge_cases() {
1224            let input = b"0.0 +3.14 -2.71828 .5 5. 123.456789";
1225            let mut lexer = Lexer::new(Cursor::new(input));
1226
1227            assert_eq!(lexer.next_token().unwrap(), Token::Real(0.0));
1228            assert_eq!(lexer.next_token().unwrap(), Token::Real(3.14));
1229            assert_eq!(lexer.next_token().unwrap(), Token::Real(-2.71828));
1230            assert_eq!(lexer.next_token().unwrap(), Token::Real(0.5));
1231            assert_eq!(lexer.next_token().unwrap(), Token::Real(5.0));
1232            assert_eq!(lexer.next_token().unwrap(), Token::Real(123.456789));
1233        }
1234
1235        #[test]
1236        fn test_lexer_scientific_notation() {
1237            let input = b"1.23e10 -4.56E-5 1e0 2E+3";
1238            let mut lexer = Lexer::new(Cursor::new(input));
1239
1240            assert_eq!(lexer.next_token().unwrap(), Token::Real(1.23e10));
1241            assert_eq!(lexer.next_token().unwrap(), Token::Real(-4.56e-5));
1242            assert_eq!(lexer.next_token().unwrap(), Token::Real(1e0));
1243            assert_eq!(lexer.next_token().unwrap(), Token::Real(2e3));
1244        }
1245
1246        #[test]
1247        fn test_lexer_string_literal_escapes() {
1248            let input = b"(Hello\\nWorld) (Tab\\tChar) (Quote\\\"Mark) (Backslash\\\\)";
1249            let mut lexer = Lexer::new(Cursor::new(input));
1250
1251            assert_eq!(
1252                lexer.next_token().unwrap(),
1253                Token::String(b"Hello\nWorld".to_vec())
1254            );
1255            assert_eq!(
1256                lexer.next_token().unwrap(),
1257                Token::String(b"Tab\tChar".to_vec())
1258            );
1259            assert_eq!(
1260                lexer.next_token().unwrap(),
1261                Token::String(b"Quote\"Mark".to_vec())
1262            );
1263            assert_eq!(
1264                lexer.next_token().unwrap(),
1265                Token::String(b"Backslash\\".to_vec())
1266            );
1267        }
1268
1269        #[test]
1270        fn test_lexer_string_literal_nested_parens() {
1271            let input = b"(Nested (parentheses) work)";
1272            let mut lexer = Lexer::new(Cursor::new(input));
1273
1274            assert_eq!(
1275                lexer.next_token().unwrap(),
1276                Token::String(b"Nested (parentheses) work".to_vec())
1277            );
1278        }
1279
1280        #[test]
1281        fn test_lexer_string_literal_empty() {
1282            let input = b"()";
1283            let mut lexer = Lexer::new(Cursor::new(input));
1284
1285            assert_eq!(lexer.next_token().unwrap(), Token::String(b"".to_vec()));
1286        }
1287
1288        #[test]
1289        fn test_lexer_hexadecimal_strings() {
1290            let input = b"<48656C6C6F> <20576F726C64> <>";
1291            let mut lexer = Lexer::new(Cursor::new(input));
1292
1293            assert_eq!(
1294                lexer.next_token().unwrap(),
1295                Token::String(b"Hello".to_vec())
1296            );
1297            assert_eq!(
1298                lexer.next_token().unwrap(),
1299                Token::String(b" World".to_vec())
1300            );
1301            assert_eq!(lexer.next_token().unwrap(), Token::String(b"".to_vec()));
1302        }
1303
1304        #[test]
1305        fn test_lexer_hexadecimal_strings_odd_length() {
1306            let input = b"<48656C6C6F2> <1> <ABC>";
1307            let mut lexer = Lexer::new(Cursor::new(input));
1308
1309            // Odd length hex strings should pad with 0
1310            assert_eq!(
1311                lexer.next_token().unwrap(),
1312                Token::String(b"Hello ".to_vec())
1313            );
1314            assert_eq!(lexer.next_token().unwrap(), Token::String(b"\x10".to_vec()));
1315            assert_eq!(
1316                lexer.next_token().unwrap(),
1317                Token::String(b"\xAB\xC0".to_vec())
1318            );
1319        }
1320
1321        #[test]
1322        fn test_lexer_hexadecimal_strings_whitespace() {
1323            let input = b"<48 65 6C 6C 6F>";
1324            let mut lexer = Lexer::new(Cursor::new(input));
1325
1326            assert_eq!(
1327                lexer.next_token().unwrap(),
1328                Token::String(b"Hello".to_vec())
1329            );
1330        }
1331
1332        #[test]
1333        fn test_lexer_names() {
1334            let input = b"/Type /Page /Root /Kids /Count /MediaBox";
1335            let mut lexer = Lexer::new(Cursor::new(input));
1336
1337            assert_eq!(lexer.next_token().unwrap(), Token::Name("Type".to_string()));
1338            assert_eq!(lexer.next_token().unwrap(), Token::Name("Page".to_string()));
1339            assert_eq!(lexer.next_token().unwrap(), Token::Name("Root".to_string()));
1340            assert_eq!(lexer.next_token().unwrap(), Token::Name("Kids".to_string()));
1341            assert_eq!(
1342                lexer.next_token().unwrap(),
1343                Token::Name("Count".to_string())
1344            );
1345            assert_eq!(
1346                lexer.next_token().unwrap(),
1347                Token::Name("MediaBox".to_string())
1348            );
1349        }
1350
1351        #[test]
1352        fn test_lexer_names_with_special_chars() {
1353            let input = b"/Name#20with#20spaces /Name#2Fwith#2Fslashes";
1354            let mut lexer = Lexer::new(Cursor::new(input));
1355
1356            assert_eq!(
1357                lexer.next_token().unwrap(),
1358                Token::Name("Name with spaces".to_string())
1359            );
1360            assert_eq!(
1361                lexer.next_token().unwrap(),
1362                Token::Name("Name/with/slashes".to_string())
1363            );
1364        }
1365
1366        #[test]
1367        fn test_lexer_names_edge_cases() {
1368            let input = b"/ /A /123 /true /false /null";
1369            let mut lexer = Lexer::new(Cursor::new(input));
1370
1371            assert_eq!(lexer.next_token().unwrap(), Token::Name("".to_string()));
1372            assert_eq!(lexer.next_token().unwrap(), Token::Name("A".to_string()));
1373            assert_eq!(lexer.next_token().unwrap(), Token::Name("123".to_string()));
1374            assert_eq!(lexer.next_token().unwrap(), Token::Name("true".to_string()));
1375            assert_eq!(
1376                lexer.next_token().unwrap(),
1377                Token::Name("false".to_string())
1378            );
1379            assert_eq!(lexer.next_token().unwrap(), Token::Name("null".to_string()));
1380        }
1381
1382        #[test]
1383        fn test_lexer_nested_dictionaries() {
1384            let input = b"<< /Type /Page /Resources << /Font << /F1 123 0 R >> >> >>";
1385            let mut lexer = Lexer::new(Cursor::new(input));
1386
1387            assert_eq!(lexer.next_token().unwrap(), Token::DictStart);
1388            assert_eq!(lexer.next_token().unwrap(), Token::Name("Type".to_string()));
1389            assert_eq!(lexer.next_token().unwrap(), Token::Name("Page".to_string()));
1390            assert_eq!(
1391                lexer.next_token().unwrap(),
1392                Token::Name("Resources".to_string())
1393            );
1394            assert_eq!(lexer.next_token().unwrap(), Token::DictStart);
1395            assert_eq!(lexer.next_token().unwrap(), Token::Name("Font".to_string()));
1396            assert_eq!(lexer.next_token().unwrap(), Token::DictStart);
1397            assert_eq!(lexer.next_token().unwrap(), Token::Name("F1".to_string()));
1398            assert_eq!(lexer.next_token().unwrap(), Token::Integer(123));
1399            assert_eq!(lexer.next_token().unwrap(), Token::Integer(0));
1400            assert_eq!(lexer.next_token().unwrap(), Token::Name("R".to_string()));
1401            assert_eq!(lexer.next_token().unwrap(), Token::DictEnd);
1402            assert_eq!(lexer.next_token().unwrap(), Token::DictEnd);
1403            assert_eq!(lexer.next_token().unwrap(), Token::DictEnd);
1404        }
1405
1406        #[test]
1407        fn test_lexer_nested_arrays() {
1408            let input = b"[[1 2] [3 4] [5 [6 7]]]";
1409            let mut lexer = Lexer::new(Cursor::new(input));
1410
1411            assert_eq!(lexer.next_token().unwrap(), Token::ArrayStart);
1412            assert_eq!(lexer.next_token().unwrap(), Token::ArrayStart);
1413            assert_eq!(lexer.next_token().unwrap(), Token::Integer(1));
1414            assert_eq!(lexer.next_token().unwrap(), Token::Integer(2));
1415            assert_eq!(lexer.next_token().unwrap(), Token::ArrayEnd);
1416            assert_eq!(lexer.next_token().unwrap(), Token::ArrayStart);
1417            assert_eq!(lexer.next_token().unwrap(), Token::Integer(3));
1418            assert_eq!(lexer.next_token().unwrap(), Token::Integer(4));
1419            assert_eq!(lexer.next_token().unwrap(), Token::ArrayEnd);
1420            assert_eq!(lexer.next_token().unwrap(), Token::ArrayStart);
1421            assert_eq!(lexer.next_token().unwrap(), Token::Integer(5));
1422            assert_eq!(lexer.next_token().unwrap(), Token::ArrayStart);
1423            assert_eq!(lexer.next_token().unwrap(), Token::Integer(6));
1424            assert_eq!(lexer.next_token().unwrap(), Token::Integer(7));
1425            assert_eq!(lexer.next_token().unwrap(), Token::ArrayEnd);
1426            assert_eq!(lexer.next_token().unwrap(), Token::ArrayEnd);
1427            assert_eq!(lexer.next_token().unwrap(), Token::ArrayEnd);
1428        }
1429
1430        #[test]
1431        fn test_lexer_mixed_content() {
1432            let input = b"<< /Type /Page /MediaBox [0 0 612 792] /Resources << /Font << /F1 << /Type /Font /Subtype /Type1 >> >> >> >>";
1433            let mut lexer = Lexer::new(Cursor::new(input));
1434
1435            // Just test that we can parse this without errors
1436            let mut tokens = Vec::new();
1437            loop {
1438                match lexer.next_token().unwrap() {
1439                    Token::Eof => break,
1440                    token => tokens.push(token),
1441                }
1442            }
1443            assert!(tokens.len() > 10);
1444        }
1445
1446        #[test]
1447        fn test_lexer_keywords() {
1448            let input = b"obj endobj stream endstream startxref";
1449            let mut lexer = Lexer::new(Cursor::new(input));
1450
1451            assert_eq!(lexer.next_token().unwrap(), Token::Obj);
1452            assert_eq!(lexer.next_token().unwrap(), Token::EndObj);
1453            assert_eq!(lexer.next_token().unwrap(), Token::Stream);
1454            assert_eq!(lexer.next_token().unwrap(), Token::EndStream);
1455            assert_eq!(lexer.next_token().unwrap(), Token::StartXRef);
1456        }
1457
1458        #[test]
1459        fn test_lexer_multiple_comments() {
1460            let input = b"%First comment\n%Second comment\n123";
1461            let mut lexer = Lexer::new(Cursor::new(input));
1462
1463            assert_eq!(
1464                lexer.next_token().unwrap(),
1465                Token::Comment("First comment".to_string())
1466            );
1467            assert_eq!(
1468                lexer.next_token().unwrap(),
1469                Token::Comment("Second comment".to_string())
1470            );
1471            assert_eq!(lexer.next_token().unwrap(), Token::Integer(123));
1472        }
1473
1474        #[test]
1475        fn test_lexer_comment_without_newline() {
1476            let input = b"%Comment at end";
1477            let mut lexer = Lexer::new(Cursor::new(input));
1478
1479            assert_eq!(
1480                lexer.next_token().unwrap(),
1481                Token::Comment("Comment at end".to_string())
1482            );
1483            assert_eq!(lexer.next_token().unwrap(), Token::Eof);
1484        }
1485
1486        #[test]
1487        fn test_lexer_special_characters_in_streams() {
1488            let input = b"<< /Length 5 >> stream\nHello endstream";
1489            let mut lexer = Lexer::new(Cursor::new(input));
1490
1491            assert_eq!(lexer.next_token().unwrap(), Token::DictStart);
1492            assert_eq!(
1493                lexer.next_token().unwrap(),
1494                Token::Name("Length".to_string())
1495            );
1496            assert_eq!(lexer.next_token().unwrap(), Token::Integer(5));
1497            assert_eq!(lexer.next_token().unwrap(), Token::DictEnd);
1498            assert_eq!(lexer.next_token().unwrap(), Token::Stream);
1499            // The actual stream content would be handled by a higher-level parser
1500        }
1501
1502        #[test]
1503        fn test_lexer_push_token() {
1504            let input = b"123 456";
1505            let mut lexer = Lexer::new(Cursor::new(input));
1506
1507            let token1 = lexer.next_token().unwrap();
1508            assert_eq!(token1, Token::Integer(123));
1509
1510            let token2 = lexer.next_token().unwrap();
1511            assert_eq!(token2, Token::Integer(456));
1512
1513            // Push token2 back
1514            lexer.push_token(token2.clone());
1515
1516            // Should get token2 again
1517            let token3 = lexer.next_token().unwrap();
1518            assert_eq!(token3, token2);
1519
1520            // Should get EOF
1521            let token4 = lexer.next_token().unwrap();
1522            assert_eq!(token4, Token::Eof);
1523        }
1524
1525        #[test]
1526        fn test_lexer_push_multiple_tokens() {
1527            let input = b"123";
1528            let mut lexer = Lexer::new(Cursor::new(input));
1529
1530            let original_token = lexer.next_token().unwrap();
1531            assert_eq!(original_token, Token::Integer(123));
1532
1533            // Push multiple tokens
1534            lexer.push_token(Token::Boolean(true));
1535            lexer.push_token(Token::Boolean(false));
1536            lexer.push_token(Token::Null);
1537
1538            // Should get them back in reverse order (stack behavior)
1539            assert_eq!(lexer.next_token().unwrap(), Token::Null);
1540            assert_eq!(lexer.next_token().unwrap(), Token::Boolean(false));
1541            assert_eq!(lexer.next_token().unwrap(), Token::Boolean(true));
1542            assert_eq!(lexer.next_token().unwrap(), Token::Eof);
1543        }
1544
1545        #[test]
1546        fn test_lexer_read_newline() {
1547            let input = b"123\n456\r\n789";
1548            let mut lexer = Lexer::new(Cursor::new(input));
1549
1550            // Read first digits
1551            let digits1 = lexer.read_digits().unwrap();
1552            assert_eq!(digits1, "123");
1553            assert!(lexer.read_newline().is_ok());
1554
1555            // Read second digits
1556            let digits2 = lexer.read_digits().unwrap();
1557            assert_eq!(digits2, "456");
1558            assert!(lexer.read_newline().is_ok());
1559
1560            // Read final digits
1561            let digits3 = lexer.read_digits().unwrap();
1562            assert_eq!(digits3, "789");
1563        }
1564
1565        #[test]
1566        fn test_lexer_read_bytes() {
1567            let input = b"Hello World";
1568            let mut lexer = Lexer::new(Cursor::new(input));
1569
1570            let bytes = lexer.read_bytes(5).unwrap();
1571            assert_eq!(bytes, b"Hello");
1572
1573            let bytes = lexer.read_bytes(6).unwrap();
1574            assert_eq!(bytes, b" World");
1575        }
1576
1577        #[test]
1578        fn test_lexer_read_until_sequence() {
1579            let input = b"Hello endstream World";
1580            let mut lexer = Lexer::new(Cursor::new(input));
1581
1582            let result = lexer.read_until_sequence(b"endstream").unwrap();
1583            assert_eq!(result, b"Hello ");
1584
1585            // Continue reading after the sequence
1586            let rest = lexer.read_digits().unwrap();
1587            assert_eq!(rest, ""); // read_digits only reads digits, " World" has no digits
1588        }
1589
1590        #[test]
1591        fn test_lexer_read_until_sequence_not_found() {
1592            let input = b"Hello World";
1593            let mut lexer = Lexer::new(Cursor::new(input));
1594
1595            let result = lexer.read_until_sequence(b"notfound");
1596            assert!(result.is_err());
1597        }
1598
1599        #[test]
1600        fn test_lexer_position_tracking() {
1601            let input = b"123 456";
1602            let mut lexer = Lexer::new(Cursor::new(input));
1603
1604            let initial_pos = lexer.position();
1605            assert_eq!(initial_pos, 0);
1606
1607            lexer.next_token().unwrap(); // "123"
1608            let pos_after_first = lexer.position();
1609            assert!(pos_after_first > initial_pos);
1610
1611            lexer.next_token().unwrap(); // "456"
1612            let pos_after_second = lexer.position();
1613            assert!(pos_after_second > pos_after_first);
1614        }
1615
1616        #[test]
1617        fn test_lexer_large_numbers() {
1618            let input = b"2147483647 -2147483648 9223372036854775807 -9223372036854775808";
1619            let mut lexer = Lexer::new(Cursor::new(input));
1620
1621            assert_eq!(lexer.next_token().unwrap(), Token::Integer(2147483647));
1622            assert_eq!(lexer.next_token().unwrap(), Token::Integer(-2147483648));
1623            assert_eq!(
1624                lexer.next_token().unwrap(),
1625                Token::Integer(9223372036854775807)
1626            );
1627            assert_eq!(
1628                lexer.next_token().unwrap(),
1629                Token::Integer(-9223372036854775808)
1630            );
1631        }
1632
1633        #[test]
1634        fn test_lexer_very_long_string() {
1635            let long_str = "A".repeat(1000);
1636            let input = format!("({long_str})");
1637            let mut lexer = Lexer::new(Cursor::new(input.as_bytes()));
1638
1639            if let Token::String(s) = lexer.next_token().unwrap() {
1640                assert_eq!(s.len(), 1000);
1641                assert_eq!(s, long_str.as_bytes());
1642            } else {
1643                panic!("Expected string token");
1644            }
1645        }
1646
1647        #[test]
1648        fn test_lexer_very_long_name() {
1649            let long_name = "A".repeat(500);
1650            let input = format!("/{long_name}");
1651            let mut lexer = Lexer::new(Cursor::new(input.as_bytes()));
1652
1653            if let Token::Name(name) = lexer.next_token().unwrap() {
1654                assert_eq!(name.len(), 500);
1655                assert_eq!(name, long_name);
1656            } else {
1657                panic!("Expected name token");
1658            }
1659        }
1660
1661        #[test]
1662        fn test_lexer_error_handling_invalid_hex() {
1663            let input = b"<48656C6C6FG>";
1664            let mut lexer = Lexer::new(Cursor::new(input));
1665
1666            // Should handle invalid hex gracefully
1667            let result = lexer.next_token();
1668            assert!(result.is_ok() || result.is_err()); // Either works or fails gracefully
1669        }
1670
1671        #[test]
1672        fn test_lexer_all_token_types() {
1673            let input = b"true false null 123 -456 3.14 (string) <48656C6C6F> /Name [ ] << >> obj endobj stream endstream startxref % comment\n";
1674            let mut lexer = Lexer::new(Cursor::new(input));
1675
1676            let mut token_types = Vec::new();
1677            loop {
1678                match lexer.next_token().unwrap() {
1679                    Token::Eof => break,
1680                    token => token_types.push(std::mem::discriminant(&token)),
1681                }
1682            }
1683
1684            // Should have multiple different token types
1685            assert!(token_types.len() > 10);
1686        }
1687
1688        #[test]
1689        fn test_lexer_performance() {
1690            let input = "123 456 789 ".repeat(1000);
1691            let mut lexer = Lexer::new(Cursor::new(input.as_bytes()));
1692
1693            let start_time = std::time::Instant::now();
1694            let mut count = 0;
1695            loop {
1696                match lexer.next_token().unwrap() {
1697                    Token::Eof => break,
1698                    _ => count += 1,
1699                }
1700            }
1701            let elapsed = start_time.elapsed();
1702
1703            assert_eq!(count, 3000); // 1000 repetitions * 3 tokens each
1704            assert!(elapsed.as_millis() < 1000); // Should complete within 1 second
1705        }
1706    }
1707
1708    #[test]
1709    fn test_lexer_find_keyword_ahead() {
1710        let input = b"some data here endstream more data";
1711        let mut lexer = Lexer::new(Cursor::new(input));
1712
1713        // Find endstream keyword
1714        let result = lexer.find_keyword_ahead("endstream", 100);
1715        assert!(result.is_ok());
1716        assert_eq!(result.unwrap(), Some(15)); // Position of endstream
1717
1718        // Try to find non-existent keyword
1719        let result2 = lexer.find_keyword_ahead("notfound", 100);
1720        assert!(result2.is_ok());
1721        assert_eq!(result2.unwrap(), None);
1722
1723        // Test with limited search distance
1724        let result3 = lexer.find_keyword_ahead("endstream", 10);
1725        assert!(result3.is_ok());
1726        assert_eq!(result3.unwrap(), None); // Not found within 10 bytes
1727    }
1728
1729    #[test]
1730    fn test_lexer_peek_token() {
1731        let input = b"123 456 /Name";
1732        let mut lexer = Lexer::new(Cursor::new(input));
1733
1734        // Peek first token
1735        let peeked = lexer.peek_token();
1736        assert!(peeked.is_ok());
1737        assert_eq!(peeked.unwrap(), Token::Integer(123));
1738
1739        // Verify peek doesn't consume
1740        let next = lexer.next_token();
1741        assert!(next.is_ok());
1742        assert_eq!(next.unwrap(), Token::Integer(123));
1743
1744        // Peek and consume next tokens
1745        assert_eq!(lexer.peek_token().unwrap(), Token::Integer(456));
1746        assert_eq!(lexer.next_token().unwrap(), Token::Integer(456));
1747
1748        assert_eq!(lexer.peek_token().unwrap(), Token::Name("Name".to_string()));
1749        assert_eq!(lexer.next_token().unwrap(), Token::Name("Name".to_string()));
1750    }
1751
1752    #[test]
1753    fn test_lexer_expect_keyword() {
1754        let input = b"endstream obj endobj";
1755        let mut lexer = Lexer::new(Cursor::new(input));
1756
1757        // Expect correct keyword
1758        assert!(lexer.expect_keyword("endstream").is_ok());
1759
1760        // Expect another correct keyword
1761        assert!(lexer.expect_keyword("obj").is_ok());
1762
1763        // Expect wrong keyword (should fail)
1764        let result = lexer.expect_keyword("stream");
1765        assert!(result.is_err());
1766        match result {
1767            Err(ParseError::UnexpectedToken { expected, found }) => {
1768                assert!(expected.contains("stream"));
1769                assert!(found.contains("EndObj"));
1770            }
1771            _ => panic!("Expected UnexpectedToken error"),
1772        }
1773    }
1774
1775    #[test]
1776    fn test_lexer_save_restore_position() {
1777        let input = b"123 456 789";
1778        let mut lexer = Lexer::new(Cursor::new(input));
1779
1780        // Read first token
1781        assert_eq!(lexer.next_token().unwrap(), Token::Integer(123));
1782
1783        // Save position
1784        let saved = lexer.save_position();
1785        assert!(saved.is_ok());
1786        let saved_pos = saved.unwrap();
1787
1788        // Read more tokens
1789        assert_eq!(lexer.next_token().unwrap(), Token::Integer(456));
1790        assert_eq!(lexer.next_token().unwrap(), Token::Integer(789));
1791
1792        // Restore position
1793        assert!(lexer.restore_position(saved_pos).is_ok());
1794
1795        // Should be back at second token
1796        assert_eq!(lexer.next_token().unwrap(), Token::Integer(456));
1797    }
1798
1799    #[test]
1800    fn test_lexer_character_encoding_recovery() {
1801        // Test string with encoding issues (Windows-1252 bytes)
1802        let input = b"(Caf\x80 \x91Hello\x92)"; // "Café 'Hello'"
1803        let options = ParseOptions::lenient();
1804        let mut lexer = Lexer::new_with_options(Cursor::new(input), options);
1805
1806        match lexer.next_token().unwrap() {
1807            Token::String(bytes) => {
1808                // Should contain the text, potentially with encoding recovery
1809                let text = String::from_utf8_lossy(&bytes);
1810                println!("Recovered text: {text}");
1811                assert!(!text.is_empty()); // Should not be empty
1812            }
1813            other => panic!("Expected String token, got {other:?}"),
1814        }
1815
1816        // Check that warnings were collected
1817        let warnings = lexer.warnings();
1818        if !warnings.is_empty() {
1819            println!("Encoding warnings: {warnings:?}");
1820        }
1821    }
1822}