1use super::{ParseError, ParseOptions, ParseResult, ParseWarning};
6use std::io::{Read, Seek, SeekFrom};
7
8#[derive(Debug, Clone, PartialEq)]
10pub enum Token {
11 Boolean(bool),
13
14 Integer(i64),
16
17 Real(f64),
19
20 String(Vec<u8>),
22
23 Name(String),
25
26 ArrayStart,
28
29 ArrayEnd,
31
32 DictStart,
34
35 DictEnd,
37
38 Stream,
40
41 EndStream,
43
44 Obj,
46
47 EndObj,
49
50 StartXRef,
52
53 Reference(u32, u16),
55
56 Null,
58
59 Comment(String),
61
62 Eof,
64}
65
66pub struct Lexer<R> {
68 reader: std::io::BufReader<R>,
69 #[allow(dead_code)]
70 buffer: Vec<u8>,
71 position: usize,
72 peek_buffer: Option<u8>,
73 token_buffer: Vec<Token>,
74 options: ParseOptions,
75 warnings: Vec<ParseWarning>,
76}
77
78impl<R: Read> Lexer<R> {
79 pub fn new(reader: R) -> Self {
81 Self::new_with_options(reader, ParseOptions::default())
82 }
83
84 pub fn new_with_options(reader: R, options: ParseOptions) -> Self {
86 Self {
87 reader: std::io::BufReader::new(reader),
88 buffer: Vec::with_capacity(1024),
89 position: 0,
90 peek_buffer: None,
91 token_buffer: Vec::new(),
92 options,
93 warnings: Vec::new(),
94 }
95 }
96
97 pub fn warnings(&self) -> &[ParseWarning] {
99 &self.warnings
100 }
101
102 pub fn next_token(&mut self) -> ParseResult<Token> {
104 if let Some(token) = self.token_buffer.pop() {
106 return Ok(token);
107 }
108
109 self.skip_whitespace()?;
110
111 let ch = match self.peek_char()? {
112 Some(ch) => ch,
113 None => return Ok(Token::Eof),
114 };
115
116 match ch {
117 b'%' => self.read_comment(),
118 b'/' => self.read_name(),
119 b'(' => self.read_literal_string(),
120 b'<' => self.read_angle_bracket(),
121 b'>' => {
122 self.consume_char()?;
123 if self.peek_char()? == Some(b'>') {
124 self.consume_char()?;
125 Ok(Token::DictEnd)
126 } else {
127 Err(ParseError::SyntaxError {
128 position: self.position,
129 message: "Expected '>' after '>'".to_string(),
130 })
131 }
132 }
133 b'[' => {
134 self.consume_char()?;
135 Ok(Token::ArrayStart)
136 }
137 b']' => {
138 self.consume_char()?;
139 Ok(Token::ArrayEnd)
140 }
141 b't' | b'f' => self.read_boolean(),
142 b'n' => self.read_null(),
143 b'+' | b'-' | b'0'..=b'9' | b'.' => self.read_number(),
144 b'R' => {
145 self.consume_char()?;
147 Ok(Token::Name("R".to_string()))
148 }
149 _ if ch.is_ascii_alphabetic() => self.read_keyword(),
150 b';' => {
151 self.consume_char()?;
153 self.next_token() }
155 _ => {
156 if self.is_problematic_encoding_char(ch) {
158 self.handle_encoding_char_in_token_stream(ch)
159 } else if self.options.lenient_syntax {
160 if self.options.collect_warnings {
162 tracing::debug!(
163 "Warning: Skipping unexpected character '{}' at position {}",
164 ch as char,
165 self.position
166 );
167 }
168 self.consume_char()?;
169 self.next_token() } else {
171 Err(ParseError::SyntaxError {
172 position: self.position,
173 message: format!("Unexpected character: {}", ch as char),
174 })
175 }
176 }
177 }
178 }
179
180 fn peek_char(&mut self) -> ParseResult<Option<u8>> {
182 if let Some(ch) = self.peek_buffer {
183 return Ok(Some(ch));
184 }
185
186 let mut buf = [0u8; 1];
187 match self.reader.read_exact(&mut buf) {
188 Ok(_) => {
189 self.peek_buffer = Some(buf[0]);
190 Ok(Some(buf[0]))
191 }
192 Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => Ok(None),
193 Err(e) => Err(e.into()),
194 }
195 }
196
197 fn consume_char(&mut self) -> ParseResult<Option<u8>> {
199 let ch = self.peek_char()?;
200 if ch.is_some() {
201 self.peek_buffer = None;
202 self.position += 1;
203 }
204 Ok(ch)
205 }
206
207 pub(crate) fn skip_whitespace(&mut self) -> ParseResult<usize> {
209 let mut count = 0;
210 while let Some(ch) = self.peek_char()? {
211 if ch.is_ascii_whitespace() {
212 self.consume_char()?;
213 count += 1;
214 } else {
215 break;
216 }
217 }
218 Ok(count)
219 }
220
221 fn read_comment(&mut self) -> ParseResult<Token> {
223 self.consume_char()?; let mut comment = String::new();
225
226 while let Some(ch) = self.peek_char()? {
227 if ch == b'\n' || ch == b'\r' {
228 break;
229 }
230 self.consume_char()?;
231 comment.push(ch as char);
232 }
233
234 Ok(Token::Comment(comment))
235 }
236
237 fn read_name(&mut self) -> ParseResult<Token> {
239 self.consume_char()?; let mut name = String::new();
241
242 while let Some(ch) = self.peek_char()? {
243 if ch.is_ascii_whitespace()
244 || matches!(ch, b'/' | b'<' | b'>' | b'[' | b']' | b'(' | b')' | b'%')
245 {
246 break;
247 }
248 self.consume_char()?;
249
250 if ch == b'#' {
252 let hex1 = self
253 .consume_char()?
254 .ok_or_else(|| ParseError::SyntaxError {
255 position: self.position,
256 message: "Incomplete hex code in name".to_string(),
257 })?;
258 let hex2 = self
259 .consume_char()?
260 .ok_or_else(|| ParseError::SyntaxError {
261 position: self.position,
262 message: "Incomplete hex code in name".to_string(),
263 })?;
264
265 let value = u8::from_str_radix(&format!("{}{}", hex1 as char, hex2 as char), 16)
266 .map_err(|_| ParseError::SyntaxError {
267 position: self.position,
268 message: "Invalid hex code in name".to_string(),
269 })?;
270
271 name.push(value as char);
272 } else {
273 name.push(ch as char);
274 }
275 }
276
277 Ok(Token::Name(name))
278 }
279
280 fn read_literal_string(&mut self) -> ParseResult<Token> {
282 self.consume_char()?; let mut string = Vec::new();
284 let mut paren_depth = 1;
285 let mut escape = false;
286
287 while paren_depth > 0 {
288 let ch = match self.consume_char()? {
289 Some(c) => c,
290 None => {
291 if self.options.lenient_syntax {
292 if self.options.collect_warnings {
294 self.warnings.push(ParseWarning::SyntaxErrorRecovered {
295 position: self.position,
296 expected: "closing parenthesis".to_string(),
297 found: "EOF".to_string(),
298 recovery_action: "returned partial string content".to_string(),
299 });
300 }
301 break;
302 } else {
303 return Err(ParseError::SyntaxError {
304 position: self.position,
305 message: "Unterminated string".to_string(),
306 });
307 }
308 }
309 };
310
311 if escape {
312 let escaped = match ch {
313 b'n' => b'\n',
314 b'r' => b'\r',
315 b't' => b'\t',
316 b'b' => b'\x08',
317 b'f' => b'\x0C',
318 b'(' => b'(',
319 b')' => b')',
320 b'\\' => b'\\',
321 b'0'..=b'7' => {
322 let mut value = u16::from(ch - b'0');
326 for _ in 0..2 {
327 if let Some(next) = self.peek_char()? {
328 if matches!(next, b'0'..=b'7') {
329 self.consume_char()?;
330 value = value * 8 + u16::from(next - b'0');
331 } else {
332 break;
333 }
334 }
335 }
336 value as u8
337 }
338 _ => ch, };
340 string.push(escaped);
341 escape = false;
342 } else {
343 match ch {
344 b'\\' => escape = true,
345 b'(' => {
346 string.push(ch);
347 paren_depth += 1;
348 }
349 b')' => {
350 paren_depth -= 1;
351 if paren_depth > 0 {
352 string.push(ch);
353 }
354 }
355 _ => string.push(ch),
356 }
357 }
358 }
359
360 let processed_string = if self.options.lenient_encoding {
362 self.process_string_with_encoding_recovery(&string)?
363 } else {
364 string
365 };
366
367 Ok(Token::String(processed_string))
368 }
369
370 fn read_angle_bracket(&mut self) -> ParseResult<Token> {
372 self.consume_char()?; if self.peek_char()? == Some(b'<') {
375 self.consume_char()?;
376 Ok(Token::DictStart)
377 } else {
378 let mut hex_chars = String::new();
380 let mut found_end = false;
381
382 while let Some(ch) = self.peek_char()? {
383 if ch == b'>' {
384 self.consume_char()?;
385 found_end = true;
386 break;
387 }
388 self.consume_char()?;
389 if ch.is_ascii_hexdigit() {
390 hex_chars.push(ch as char);
391 } else if !ch.is_ascii_whitespace() {
392 if self.options.lenient_syntax {
393 if self.options.collect_warnings {
395 self.warnings.push(ParseWarning::SyntaxErrorRecovered {
396 position: self.position,
397 expected: "hex digit".to_string(),
398 found: format!("'{}'", ch as char),
399 recovery_action: "skipped invalid character".to_string(),
400 });
401 }
402 } else {
403 return Err(ParseError::SyntaxError {
404 position: self.position,
405 message: "Invalid character in hex string".to_string(),
406 });
407 }
408 }
409 }
410
411 if !found_end {
412 if self.options.lenient_syntax {
413 if self.options.collect_warnings {
415 self.warnings.push(ParseWarning::SyntaxErrorRecovered {
416 position: self.position,
417 expected: ">".to_string(),
418 found: "EOF".to_string(),
419 recovery_action: "returned partial hex string".to_string(),
420 });
421 }
422 } else {
423 return Err(ParseError::SyntaxError {
424 position: self.position,
425 message: "Unterminated hex string".to_string(),
426 });
427 }
428 }
429
430 if hex_chars.len() % 2 != 0 {
432 hex_chars.push('0');
433 }
434
435 let mut bytes = Vec::new();
437 for chunk in hex_chars.as_bytes().chunks(2) {
438 let hex_str = std::str::from_utf8(chunk).map_err(|_| ParseError::SyntaxError {
439 position: self.position,
440 message: "Invalid UTF-8 in hex string".to_string(),
441 })?;
442 let byte =
443 u8::from_str_radix(hex_str, 16).map_err(|_| ParseError::SyntaxError {
444 position: self.position,
445 message: "Invalid hex string".to_string(),
446 })?;
447 bytes.push(byte);
448 }
449
450 Ok(Token::String(bytes))
451 }
452 }
453
454 fn read_boolean(&mut self) -> ParseResult<Token> {
456 let word = self.read_word()?;
457 match word.as_str() {
458 "true" => Ok(Token::Boolean(true)),
459 "false" => Ok(Token::Boolean(false)),
460 _ => {
461 self.process_keyword(word)
463 }
464 }
465 }
466
467 fn read_null(&mut self) -> ParseResult<Token> {
469 let word = self.read_word()?;
470 if word == "null" {
471 Ok(Token::Null)
472 } else {
473 self.process_keyword(word)
475 }
476 }
477
478 fn read_number(&mut self) -> ParseResult<Token> {
480 let mut number_str = String::new();
481 let mut has_dot = false;
482
483 if let Some(ch) = self.peek_char()? {
485 if ch == b'+' || ch == b'-' {
486 self.consume_char()?;
487 number_str.push(ch as char);
488
489 if let Some(next) = self.peek_char()? {
491 if !next.is_ascii_digit() && next != b'.' {
492 return Err(ParseError::SyntaxError {
493 position: self.position,
494 message: "Expected digit after sign".to_string(),
495 });
496 }
497 }
498 }
499 }
500
501 while let Some(ch) = self.peek_char()? {
503 match ch {
504 b'0'..=b'9' => {
505 self.consume_char()?;
506 number_str.push(ch as char);
507 }
508 b'.' if !has_dot => {
509 self.consume_char()?;
510 number_str.push(ch as char);
511 has_dot = true;
512 }
513 _ => break,
514 }
515 }
516
517 if let Some(ch) = self.peek_char()? {
519 if ch == b'e' || ch == b'E' {
520 self.consume_char()?;
521 number_str.push(ch as char);
522
523 if let Some(sign_ch) = self.peek_char()? {
525 if sign_ch == b'+' || sign_ch == b'-' {
526 self.consume_char()?;
527 number_str.push(sign_ch as char);
528 }
529 }
530
531 while let Some(digit_ch) = self.peek_char()? {
533 if digit_ch.is_ascii_digit() {
534 self.consume_char()?;
535 number_str.push(digit_ch as char);
536 } else {
537 break;
538 }
539 }
540
541 has_dot = true;
543 }
544 }
545
546 if has_dot {
551 let value = number_str
552 .parse::<f64>()
553 .map_err(|_| ParseError::SyntaxError {
554 position: self.position,
555 message: format!("Invalid real number: '{number_str}'"),
556 })?;
557 Ok(Token::Real(value))
558 } else {
559 let value = number_str
560 .parse::<i64>()
561 .map_err(|_| ParseError::SyntaxError {
562 position: self.position,
563 message: format!("Invalid integer: '{number_str}'"),
564 })?;
565 Ok(Token::Integer(value))
566 }
567 }
568
569 fn read_keyword(&mut self) -> ParseResult<Token> {
571 let word = self.read_word()?;
572 self.process_keyword(word)
573 }
574
575 fn process_keyword(&self, word: String) -> ParseResult<Token> {
577 match word.as_str() {
578 "stream" => Ok(Token::Stream),
579 "endstream" => Ok(Token::EndStream),
580 "obj" => Ok(Token::Obj),
581 "endobj" => Ok(Token::EndObj),
582 "startxref" => Ok(Token::StartXRef),
583 _ => Err(ParseError::SyntaxError {
584 position: self.position,
585 message: format!("Unknown keyword: {word}"),
586 }),
587 }
588 }
589
590 fn read_word(&mut self) -> ParseResult<String> {
592 let mut word = String::new();
593
594 while let Some(ch) = self.peek_char()? {
595 if ch.is_ascii_whitespace()
596 || matches!(ch, b'/' | b'<' | b'>' | b'[' | b']' | b'(' | b')' | b'%')
597 {
598 break;
599 }
600 self.consume_char()?;
601 word.push(ch as char);
602 }
603
604 Ok(word)
605 }
606
607 #[allow(dead_code)]
609 fn read_digits(&mut self) -> ParseResult<String> {
610 let mut digits = String::new();
611
612 while let Some(ch) = self.peek_char()? {
613 if ch.is_ascii_digit() {
614 self.consume_char()?;
615 digits.push(ch as char);
616 } else {
617 break;
618 }
619 }
620
621 Ok(digits)
622 }
623
624 pub fn read_newline(&mut self) -> ParseResult<()> {
626 match self.peek_char()? {
627 Some(b'\r') => {
628 self.consume_char()?;
629 if self.peek_char()? == Some(b'\n') {
631 self.consume_char()?;
632 }
633 Ok(())
634 }
635 Some(b'\n') => {
636 self.consume_char()?;
637 Ok(())
638 }
639 _ => Err(ParseError::SyntaxError {
640 position: self.position,
641 message: "Expected newline".to_string(),
642 }),
643 }
644 }
645
646 pub fn peek_byte(&mut self) -> ParseResult<u8> {
649 match self.peek_char()? {
650 Some(b) => Ok(b),
651 None => Err(ParseError::UnexpectedToken {
652 expected: "byte".to_string(),
653 found: "EOF".to_string(),
654 }),
655 }
656 }
657
658 pub fn read_byte(&mut self) -> ParseResult<u8> {
660 match self.consume_char()? {
661 Some(b) => Ok(b),
662 None => Err(ParseError::UnexpectedToken {
663 expected: "byte".to_string(),
664 found: "EOF".to_string(),
665 }),
666 }
667 }
668
669 pub fn seek(&mut self, pos: u64) -> ParseResult<()>
671 where
672 R: Seek,
673 {
674 self.reader.seek(SeekFrom::Start(pos))?;
675 self.position = pos as usize;
676 Ok(())
677 }
678
679 pub fn read_bytes(&mut self, n: usize) -> ParseResult<Vec<u8>> {
680 let mut bytes = Vec::with_capacity(n);
681
682 if self.peek_buffer.is_some() && n > 0 {
684 if let Some(byte) = self.consume_char()? {
685 bytes.push(byte);
686 }
687 }
688
689 let remaining = n - bytes.len();
691 if remaining > 0 {
692 let mut rest = vec![0u8; remaining];
693 self.reader.read_exact(&mut rest)?;
694 self.position += remaining;
695 bytes.extend_from_slice(&rest);
696 }
697
698 Ok(bytes)
699 }
700
701 pub fn read_until_sequence(&mut self, sequence: &[u8]) -> ParseResult<Vec<u8>> {
703 let mut result = Vec::new();
704 let mut match_pos = 0;
705
706 while let Some(ch) = self.consume_char()? {
707 result.push(ch);
708
709 if ch == sequence[match_pos] {
710 match_pos += 1;
711 if match_pos == sequence.len() {
712 result.truncate(result.len() - sequence.len());
714 break;
715 }
716 } else if ch == sequence[0] {
717 match_pos = 1;
718 } else {
719 match_pos = 0;
720 }
721 }
722
723 if match_pos < sequence.len() {
724 return Err(ParseError::SyntaxError {
725 position: self.position,
726 message: format!("Sequence {sequence:?} not found"),
727 });
728 }
729
730 Ok(result)
731 }
732
733 pub fn position(&self) -> usize {
735 self.position
736 }
737
738 pub fn push_token(&mut self, token: Token) {
740 self.token_buffer.push(token);
741 }
742
743 pub fn expect_keyword(&mut self, keyword: &str) -> ParseResult<()> {
745 let token = self.next_token()?;
746 match (keyword, &token) {
747 ("endstream", Token::EndStream) => Ok(()),
748 ("stream", Token::Stream) => Ok(()),
749 ("endobj", Token::EndObj) => Ok(()),
750 ("obj", Token::Obj) => Ok(()),
751 ("startxref", Token::StartXRef) => Ok(()),
752 _ => Err(ParseError::UnexpectedToken {
753 expected: format!("keyword '{keyword}'"),
754 found: format!("{token:?}"),
755 }),
756 }
757 }
758
759 pub fn find_keyword_ahead(
762 &mut self,
763 keyword: &str,
764 max_bytes: usize,
765 ) -> ParseResult<Option<usize>>
766 where
767 R: Seek,
768 {
769 use std::io::{Read, Seek, SeekFrom};
770
771 let current_pos = self.reader.stream_position()?;
773 let start_buffer_state = self.peek_buffer;
774
775 let keyword_bytes = keyword.as_bytes();
776 let mut bytes_read = 0;
777 let mut match_buffer = Vec::new();
778
779 while bytes_read < max_bytes {
781 let mut byte = [0u8; 1];
782 match self.reader.read_exact(&mut byte) {
783 Ok(_) => {
784 bytes_read += 1;
785 match_buffer.push(byte[0]);
786
787 if match_buffer.len() > keyword_bytes.len() {
789 match_buffer.remove(0);
790 }
791
792 if match_buffer.len() == keyword_bytes.len() && match_buffer == keyword_bytes {
794 self.reader.seek(SeekFrom::Start(current_pos))?;
796 self.peek_buffer = start_buffer_state;
797 return Ok(Some(bytes_read - keyword_bytes.len()));
798 }
799 }
800 Err(_) => break, }
802 }
803
804 self.reader.seek(SeekFrom::Start(current_pos))?;
806 self.peek_buffer = start_buffer_state;
807 Ok(None)
808 }
809
810 pub fn peek_ahead(&mut self, n: usize) -> ParseResult<Vec<u8>>
812 where
813 R: Seek,
814 {
815 use std::io::{Read, Seek, SeekFrom};
816
817 let current_pos = self.reader.stream_position()?;
819 let start_buffer_state = self.peek_buffer;
820
821 let mut bytes = vec![0u8; n];
823 let bytes_read = self.reader.read(&mut bytes)?;
824 bytes.truncate(bytes_read);
825
826 self.reader.seek(SeekFrom::Start(current_pos))?;
828 self.peek_buffer = start_buffer_state;
829
830 Ok(bytes)
831 }
832
833 pub fn save_position(&mut self) -> ParseResult<(u64, Option<u8>)>
835 where
836 R: Seek,
837 {
838 use std::io::Seek;
839 let pos = self.reader.stream_position()?;
840 Ok((pos, self.peek_buffer))
841 }
842
843 pub fn restore_position(&mut self, saved: (u64, Option<u8>)) -> ParseResult<()>
845 where
846 R: Seek,
847 {
848 use std::io::{Seek, SeekFrom};
849 self.reader.seek(SeekFrom::Start(saved.0))?;
850 self.peek_buffer = saved.1;
851 self.position = saved.0 as usize;
852 Ok(())
853 }
854
855 pub fn peek_token(&mut self) -> ParseResult<Token>
857 where
858 R: Seek,
859 {
860 let saved_pos = self.save_position()?;
861 let token = self.next_token()?;
862 self.restore_position(saved_pos)?;
863 Ok(token)
864 }
865
866 fn process_string_with_encoding_recovery(
868 &mut self,
869 string_bytes: &[u8],
870 ) -> ParseResult<Vec<u8>> {
871 use super::encoding::{CharacterDecoder, EncodingOptions, EncodingType, EnhancedDecoder};
872
873 let has_problematic_chars = string_bytes.iter().any(|&b| {
875 (0x80..=0x9F).contains(&b)
877 || b == 0x07
878 || (b <= 0x1F && b != 0x09 && b != 0x0A && b != 0x0D)
879 });
880
881 let decoder = EnhancedDecoder::new();
882
883 let encoding_options = if has_problematic_chars {
885 EncodingOptions {
886 lenient_mode: true, preferred_encoding: Some(EncodingType::Windows1252), max_replacements: std::cmp::max(100, string_bytes.len() / 10), log_issues: self.options.collect_warnings,
890 }
891 } else {
892 EncodingOptions {
893 lenient_mode: self.options.lenient_encoding,
894 preferred_encoding: self.options.preferred_encoding,
895 max_replacements: 50,
896 log_issues: self.options.collect_warnings,
897 }
898 };
899
900 match decoder.decode(string_bytes, &encoding_options) {
901 Ok(result) => {
902 if (result.replacement_count > 0 || has_problematic_chars)
904 && self.options.collect_warnings
905 {
906 self.warnings.push(ParseWarning::InvalidEncoding {
907 position: self.position,
908 recovered_text: if result.text.len() > 50 {
909 let truncate_at = result
911 .text
912 .char_indices()
913 .map(|(i, _)| i)
914 .nth(47)
915 .unwrap_or_else(|| {
916 let limit = result.text.len().min(47);
918 let mut pos = limit;
919 while pos > 0 && !result.text.is_char_boundary(pos) {
920 pos -= 1;
921 }
922 pos
923 });
924
925 let safe_text = if truncate_at <= result.text.len()
927 && result.text.is_char_boundary(truncate_at)
928 {
929 result.text[..truncate_at].to_string()
930 } else {
931 result.text.chars().take(47).collect::<String>()
933 };
934
935 format!(
936 "{}... (truncated, {} chars total)",
937 safe_text,
938 result.text.chars().count()
939 )
940 } else {
941 result.text.clone()
942 },
943 encoding_used: result.detected_encoding,
944 replacement_count: result.replacement_count,
945 });
946 }
947
948 Ok(result.text.into_bytes())
950 }
951 Err(encoding_error) => {
952 if self.options.lenient_encoding {
953 let fallback_result = self.apply_fallback_encoding_strategy(string_bytes);
955
956 if self.options.collect_warnings {
957 self.warnings.push(ParseWarning::InvalidEncoding {
958 position: self.position,
959 recovered_text: format!(
960 "Fallback strategy applied: {} -> {} chars",
961 string_bytes.len(),
962 fallback_result.len()
963 ),
964 encoding_used: None,
965 replacement_count: string_bytes.len(),
966 });
967 }
968 Ok(fallback_result)
969 } else {
970 Err(ParseError::CharacterEncodingError {
971 position: self.position,
972 message: format!(
973 "Failed to decode string with any supported encoding: {encoding_error}"
974 ),
975 })
976 }
977 }
978 }
979 }
980
981 fn apply_fallback_encoding_strategy(&self, string_bytes: &[u8]) -> Vec<u8> {
983 let mut result = Vec::with_capacity(string_bytes.len());
984
985 for &byte in string_bytes {
986 match byte {
987 0x00..=0x08 | 0x0B | 0x0C | 0x0E..=0x1F => {
989 result.push(b' '); }
991 0x80..=0x9F => {
992 let replacement = match byte {
994 0x80 => b'E', 0x81 => b' ', 0x82 => b',', 0x83 => b'f', 0x84 => b'"', 0x85 => b'.', 0x86 => b'+', 0x87 => b'+', 0x88 => b'^', 0x89 => b'%', 0x8A => b'S', 0x8B => b'<', 0x8C => b'O', 0x8D => b' ', 0x8E => b'Z', 0x8F => b' ', 0x90 => b' ', 0x91 => b'\'', 0x92 => b'\'', 0x93 => b'"', 0x94 => b'"', 0x95 => b'*', 0x96 => b'-', 0x97 => b'-', 0x98 => b'~', 0x99 => b'T', 0x9A => b's', 0x9B => b'>', 0x9C => b'o', 0x9D => b' ', 0x9E => b'z', 0x9F => b'Y', _ => b'?', };
1028 result.push(replacement);
1029 }
1030 _ => {
1031 result.push(byte); }
1033 }
1034 }
1035
1036 result
1037 }
1038
1039 fn is_problematic_encoding_char(&self, ch: u8) -> bool {
1041 (0x80..=0x9F).contains(&ch) ||
1043 ch == 0x07 || (ch <= 0x1F && ch != 0x09 && ch != 0x0A && ch != 0x0D) || (self.options.lenient_syntax && ch >= 0xA0) }
1048
1049 fn handle_encoding_char_in_token_stream(&mut self, ch: u8) -> ParseResult<Token> {
1051 if self.options.lenient_encoding {
1052 self.consume_char()?;
1054
1055 if self.options.collect_warnings {
1057 let replacement_char = match ch {
1058 0x07 => "bell",
1059 0x00..=0x1F => "control",
1060 0x80..=0x9F => "latin1-supplement",
1061 _ => "unknown",
1062 };
1063
1064 self.warnings.push(ParseWarning::InvalidEncoding {
1065 position: self.position,
1066 recovered_text: format!(
1067 "Skipped problematic {replacement_char} character (0x{ch:02X})"
1068 ),
1069 encoding_used: None,
1070 replacement_count: 1,
1071 });
1072 }
1073
1074 self.skip_whitespace()?;
1076 if let Ok(Some(_)) = self.peek_char() {
1077 self.next_token() } else {
1079 Err(ParseError::SyntaxError {
1080 position: self.position,
1081 message: "Unexpected end of file after problematic character".to_string(),
1082 })
1083 }
1084 } else {
1085 let char_description = match ch {
1087 0x07 => "Bell character (\\u{07})".to_string(),
1088 0x00..=0x1F => format!("Control character (\\u{{{ch:02X}}})"),
1089 0x80..=0x9F => format!("Latin-1 supplement character (\\u{{{ch:02X}}})"),
1090 _ => format!("Problematic character (\\u{{{ch:02X}}})"),
1091 };
1092
1093 Err(ParseError::CharacterEncodingError {
1094 position: self.position,
1095 message: format!(
1096 "Unexpected character: {char_description} - Consider using lenient parsing mode"
1097 ),
1098 })
1099 }
1100 }
1101}
1102
1103#[cfg(test)]
1104mod tests {
1105 use super::*;
1106 use std::io::Cursor;
1107
1108 #[test]
1109 fn test_lexer_basic_tokens() {
1110 let input = b"123 -456 3.14 true false null /Name";
1112 let mut lexer = Lexer::new(Cursor::new(input));
1113
1114 assert_eq!(lexer.next_token().unwrap(), Token::Integer(123));
1115 assert_eq!(lexer.next_token().unwrap(), Token::Integer(-456));
1116 assert_eq!(lexer.next_token().unwrap(), Token::Real(3.14));
1117 assert_eq!(lexer.next_token().unwrap(), Token::Boolean(true));
1118 assert_eq!(lexer.next_token().unwrap(), Token::Boolean(false));
1119 assert_eq!(lexer.next_token().unwrap(), Token::Null);
1120 assert_eq!(lexer.next_token().unwrap(), Token::Name("Name".to_string()));
1121 assert_eq!(lexer.next_token().unwrap(), Token::Eof);
1122 }
1123
1124 #[test]
1125 fn test_lexer_negative_numbers() {
1126 let input = b"-123 -45.67";
1128 let mut lexer = Lexer::new(Cursor::new(input));
1129
1130 assert_eq!(lexer.next_token().unwrap(), Token::Integer(-123));
1131 assert_eq!(lexer.next_token().unwrap(), Token::Real(-45.67));
1132 }
1133
1134 #[test]
1135 fn test_lexer_strings() {
1136 let input = b"(Hello World) <48656C6C6F>";
1137 let mut lexer = Lexer::new(Cursor::new(input));
1138
1139 assert_eq!(
1140 lexer.next_token().unwrap(),
1141 Token::String(b"Hello World".to_vec())
1142 );
1143 assert_eq!(
1144 lexer.next_token().unwrap(),
1145 Token::String(b"Hello".to_vec())
1146 );
1147 }
1148
1149 #[test]
1150 fn test_lexer_dictionaries() {
1151 let input = b"<< /Type /Page >>";
1152 let mut lexer = Lexer::new(Cursor::new(input));
1153
1154 assert_eq!(lexer.next_token().unwrap(), Token::DictStart);
1155 assert_eq!(lexer.next_token().unwrap(), Token::Name("Type".to_string()));
1156 assert_eq!(lexer.next_token().unwrap(), Token::Name("Page".to_string()));
1157 assert_eq!(lexer.next_token().unwrap(), Token::DictEnd);
1158 }
1159
1160 #[test]
1161 fn test_lexer_arrays() {
1162 let input = b"[1 2 3]";
1163 let mut lexer = Lexer::new(Cursor::new(input));
1164
1165 assert_eq!(lexer.next_token().unwrap(), Token::ArrayStart);
1166 assert_eq!(lexer.next_token().unwrap(), Token::Integer(1));
1167 assert_eq!(lexer.next_token().unwrap(), Token::Integer(2));
1168 assert_eq!(lexer.next_token().unwrap(), Token::Integer(3));
1169 assert_eq!(lexer.next_token().unwrap(), Token::ArrayEnd);
1170 }
1171
1172 #[test]
1173 fn test_lexer_references() {
1174 let input = b"1 0 R 25 1 R";
1175 let mut lexer = Lexer::new(Cursor::new(input));
1176
1177 assert_eq!(lexer.next_token().unwrap(), Token::Integer(1));
1179 assert_eq!(lexer.next_token().unwrap(), Token::Integer(0));
1180 match lexer.next_token().unwrap() {
1182 Token::Name(s) if s == "R" => {} other => panic!("Expected R token, got {other:?}"),
1184 }
1185
1186 assert_eq!(lexer.next_token().unwrap(), Token::Integer(25));
1187 assert_eq!(lexer.next_token().unwrap(), Token::Integer(1));
1188 match lexer.next_token().unwrap() {
1189 Token::Name(s) if s == "R" => {} other => panic!("Expected R token, got {other:?}"),
1191 }
1192 }
1193
1194 #[test]
1195 fn test_lexer_comments() {
1196 let input = b"%PDF-1.7\n123";
1197 let mut lexer = Lexer::new(Cursor::new(input));
1198
1199 assert_eq!(
1200 lexer.next_token().unwrap(),
1201 Token::Comment("PDF-1.7".to_string())
1202 );
1203 assert_eq!(lexer.next_token().unwrap(), Token::Integer(123));
1204 }
1205
1206 mod comprehensive_tests {
1208 use super::*;
1209 use std::io::Cursor;
1210
1211 #[test]
1212 fn test_token_debug_trait() {
1213 let token = Token::Integer(42);
1214 let debug_str = format!("{token:?}");
1215 assert!(debug_str.contains("Integer"));
1216 assert!(debug_str.contains("42"));
1217 }
1218
1219 #[test]
1220 fn test_token_clone() {
1221 let token = Token::String(b"hello".to_vec());
1222 let cloned = token.clone();
1223 assert_eq!(token, cloned);
1224 }
1225
1226 #[test]
1227 fn test_token_equality() {
1228 assert_eq!(Token::Integer(42), Token::Integer(42));
1229 assert_ne!(Token::Integer(42), Token::Integer(43));
1230 assert_eq!(Token::Boolean(true), Token::Boolean(true));
1231 assert_ne!(Token::Boolean(true), Token::Boolean(false));
1232 assert_eq!(Token::Null, Token::Null);
1233 assert_ne!(Token::Null, Token::Integer(0));
1234 }
1235
1236 #[test]
1237 fn test_lexer_empty_input() {
1238 let input = b"";
1239 let mut lexer = Lexer::new(Cursor::new(input));
1240 assert_eq!(lexer.next_token().unwrap(), Token::Eof);
1241 }
1242
1243 #[test]
1244 fn test_lexer_whitespace_only() {
1245 let input = b" \t\n\r ";
1246 let mut lexer = Lexer::new(Cursor::new(input));
1247 assert_eq!(lexer.next_token().unwrap(), Token::Eof);
1248 }
1249
1250 #[test]
1251 fn test_lexer_integer_edge_cases() {
1252 let input = b"0 +123 -0 9876543210";
1253 let mut lexer = Lexer::new(Cursor::new(input));
1254
1255 assert_eq!(lexer.next_token().unwrap(), Token::Integer(0));
1256 assert_eq!(lexer.next_token().unwrap(), Token::Integer(123));
1257 assert_eq!(lexer.next_token().unwrap(), Token::Integer(0));
1258 assert_eq!(lexer.next_token().unwrap(), Token::Integer(9876543210));
1259 }
1260
1261 #[test]
1262 fn test_lexer_real_edge_cases() {
1263 let input = b"0.0 +3.14 -2.71828 .5 5. 123.456789";
1264 let mut lexer = Lexer::new(Cursor::new(input));
1265
1266 assert_eq!(lexer.next_token().unwrap(), Token::Real(0.0));
1267 assert_eq!(lexer.next_token().unwrap(), Token::Real(3.14));
1268 assert_eq!(lexer.next_token().unwrap(), Token::Real(-2.71828));
1269 assert_eq!(lexer.next_token().unwrap(), Token::Real(0.5));
1270 assert_eq!(lexer.next_token().unwrap(), Token::Real(5.0));
1271 assert_eq!(lexer.next_token().unwrap(), Token::Real(123.456789));
1272 }
1273
1274 #[test]
1275 fn test_lexer_scientific_notation() {
1276 let input = b"1.23e10 -4.56E-5 1e0 2E+3";
1277 let mut lexer = Lexer::new(Cursor::new(input));
1278
1279 assert_eq!(lexer.next_token().unwrap(), Token::Real(1.23e10));
1280 assert_eq!(lexer.next_token().unwrap(), Token::Real(-4.56e-5));
1281 assert_eq!(lexer.next_token().unwrap(), Token::Real(1e0));
1282 assert_eq!(lexer.next_token().unwrap(), Token::Real(2e3));
1283 }
1284
1285 #[test]
1286 fn test_lexer_string_literal_escapes() {
1287 let input = b"(Hello\\nWorld) (Tab\\tChar) (Quote\\\"Mark) (Backslash\\\\)";
1288 let mut lexer = Lexer::new(Cursor::new(input));
1289
1290 assert_eq!(
1291 lexer.next_token().unwrap(),
1292 Token::String(b"Hello\nWorld".to_vec())
1293 );
1294 assert_eq!(
1295 lexer.next_token().unwrap(),
1296 Token::String(b"Tab\tChar".to_vec())
1297 );
1298 assert_eq!(
1299 lexer.next_token().unwrap(),
1300 Token::String(b"Quote\"Mark".to_vec())
1301 );
1302 assert_eq!(
1303 lexer.next_token().unwrap(),
1304 Token::String(b"Backslash\\".to_vec())
1305 );
1306 }
1307
1308 #[test]
1309 fn test_lexer_string_literal_nested_parens() {
1310 let input = b"(Nested (parentheses) work)";
1311 let mut lexer = Lexer::new(Cursor::new(input));
1312
1313 assert_eq!(
1314 lexer.next_token().unwrap(),
1315 Token::String(b"Nested (parentheses) work".to_vec())
1316 );
1317 }
1318
1319 #[test]
1320 fn test_lexer_string_literal_empty() {
1321 let input = b"()";
1322 let mut lexer = Lexer::new(Cursor::new(input));
1323
1324 assert_eq!(lexer.next_token().unwrap(), Token::String(b"".to_vec()));
1325 }
1326
1327 #[test]
1328 fn test_lexer_hexadecimal_strings() {
1329 let input = b"<48656C6C6F> <20576F726C64> <>";
1330 let mut lexer = Lexer::new(Cursor::new(input));
1331
1332 assert_eq!(
1333 lexer.next_token().unwrap(),
1334 Token::String(b"Hello".to_vec())
1335 );
1336 assert_eq!(
1337 lexer.next_token().unwrap(),
1338 Token::String(b" World".to_vec())
1339 );
1340 assert_eq!(lexer.next_token().unwrap(), Token::String(b"".to_vec()));
1341 }
1342
1343 #[test]
1344 fn test_lexer_hexadecimal_strings_odd_length() {
1345 let input = b"<48656C6C6F2> <1> <ABC>";
1346 let mut lexer = Lexer::new(Cursor::new(input));
1347
1348 assert_eq!(
1350 lexer.next_token().unwrap(),
1351 Token::String(b"Hello ".to_vec())
1352 );
1353 assert_eq!(lexer.next_token().unwrap(), Token::String(b"\x10".to_vec()));
1354 assert_eq!(
1355 lexer.next_token().unwrap(),
1356 Token::String(b"\xAB\xC0".to_vec())
1357 );
1358 }
1359
1360 #[test]
1361 fn test_lexer_hexadecimal_strings_whitespace() {
1362 let input = b"<48 65 6C 6C 6F>";
1363 let mut lexer = Lexer::new(Cursor::new(input));
1364
1365 assert_eq!(
1366 lexer.next_token().unwrap(),
1367 Token::String(b"Hello".to_vec())
1368 );
1369 }
1370
1371 #[test]
1372 fn test_lexer_names() {
1373 let input = b"/Type /Page /Root /Kids /Count /MediaBox";
1374 let mut lexer = Lexer::new(Cursor::new(input));
1375
1376 assert_eq!(lexer.next_token().unwrap(), Token::Name("Type".to_string()));
1377 assert_eq!(lexer.next_token().unwrap(), Token::Name("Page".to_string()));
1378 assert_eq!(lexer.next_token().unwrap(), Token::Name("Root".to_string()));
1379 assert_eq!(lexer.next_token().unwrap(), Token::Name("Kids".to_string()));
1380 assert_eq!(
1381 lexer.next_token().unwrap(),
1382 Token::Name("Count".to_string())
1383 );
1384 assert_eq!(
1385 lexer.next_token().unwrap(),
1386 Token::Name("MediaBox".to_string())
1387 );
1388 }
1389
1390 #[test]
1391 fn test_lexer_names_with_special_chars() {
1392 let input = b"/Name#20with#20spaces /Name#2Fwith#2Fslashes";
1393 let mut lexer = Lexer::new(Cursor::new(input));
1394
1395 assert_eq!(
1396 lexer.next_token().unwrap(),
1397 Token::Name("Name with spaces".to_string())
1398 );
1399 assert_eq!(
1400 lexer.next_token().unwrap(),
1401 Token::Name("Name/with/slashes".to_string())
1402 );
1403 }
1404
1405 #[test]
1406 fn test_lexer_names_edge_cases() {
1407 let input = b"/ /A /123 /true /false /null";
1408 let mut lexer = Lexer::new(Cursor::new(input));
1409
1410 assert_eq!(lexer.next_token().unwrap(), Token::Name("".to_string()));
1411 assert_eq!(lexer.next_token().unwrap(), Token::Name("A".to_string()));
1412 assert_eq!(lexer.next_token().unwrap(), Token::Name("123".to_string()));
1413 assert_eq!(lexer.next_token().unwrap(), Token::Name("true".to_string()));
1414 assert_eq!(
1415 lexer.next_token().unwrap(),
1416 Token::Name("false".to_string())
1417 );
1418 assert_eq!(lexer.next_token().unwrap(), Token::Name("null".to_string()));
1419 }
1420
1421 #[test]
1422 fn test_lexer_nested_dictionaries() {
1423 let input = b"<< /Type /Page /Resources << /Font << /F1 123 0 R >> >> >>";
1424 let mut lexer = Lexer::new(Cursor::new(input));
1425
1426 assert_eq!(lexer.next_token().unwrap(), Token::DictStart);
1427 assert_eq!(lexer.next_token().unwrap(), Token::Name("Type".to_string()));
1428 assert_eq!(lexer.next_token().unwrap(), Token::Name("Page".to_string()));
1429 assert_eq!(
1430 lexer.next_token().unwrap(),
1431 Token::Name("Resources".to_string())
1432 );
1433 assert_eq!(lexer.next_token().unwrap(), Token::DictStart);
1434 assert_eq!(lexer.next_token().unwrap(), Token::Name("Font".to_string()));
1435 assert_eq!(lexer.next_token().unwrap(), Token::DictStart);
1436 assert_eq!(lexer.next_token().unwrap(), Token::Name("F1".to_string()));
1437 assert_eq!(lexer.next_token().unwrap(), Token::Integer(123));
1438 assert_eq!(lexer.next_token().unwrap(), Token::Integer(0));
1439 assert_eq!(lexer.next_token().unwrap(), Token::Name("R".to_string()));
1440 assert_eq!(lexer.next_token().unwrap(), Token::DictEnd);
1441 assert_eq!(lexer.next_token().unwrap(), Token::DictEnd);
1442 assert_eq!(lexer.next_token().unwrap(), Token::DictEnd);
1443 }
1444
1445 #[test]
1446 fn test_lexer_nested_arrays() {
1447 let input = b"[[1 2] [3 4] [5 [6 7]]]";
1448 let mut lexer = Lexer::new(Cursor::new(input));
1449
1450 assert_eq!(lexer.next_token().unwrap(), Token::ArrayStart);
1451 assert_eq!(lexer.next_token().unwrap(), Token::ArrayStart);
1452 assert_eq!(lexer.next_token().unwrap(), Token::Integer(1));
1453 assert_eq!(lexer.next_token().unwrap(), Token::Integer(2));
1454 assert_eq!(lexer.next_token().unwrap(), Token::ArrayEnd);
1455 assert_eq!(lexer.next_token().unwrap(), Token::ArrayStart);
1456 assert_eq!(lexer.next_token().unwrap(), Token::Integer(3));
1457 assert_eq!(lexer.next_token().unwrap(), Token::Integer(4));
1458 assert_eq!(lexer.next_token().unwrap(), Token::ArrayEnd);
1459 assert_eq!(lexer.next_token().unwrap(), Token::ArrayStart);
1460 assert_eq!(lexer.next_token().unwrap(), Token::Integer(5));
1461 assert_eq!(lexer.next_token().unwrap(), Token::ArrayStart);
1462 assert_eq!(lexer.next_token().unwrap(), Token::Integer(6));
1463 assert_eq!(lexer.next_token().unwrap(), Token::Integer(7));
1464 assert_eq!(lexer.next_token().unwrap(), Token::ArrayEnd);
1465 assert_eq!(lexer.next_token().unwrap(), Token::ArrayEnd);
1466 assert_eq!(lexer.next_token().unwrap(), Token::ArrayEnd);
1467 }
1468
1469 #[test]
1470 fn test_lexer_mixed_content() {
1471 let input = b"<< /Type /Page /MediaBox [0 0 612 792] /Resources << /Font << /F1 << /Type /Font /Subtype /Type1 >> >> >> >>";
1472 let mut lexer = Lexer::new(Cursor::new(input));
1473
1474 let mut tokens = Vec::new();
1476 loop {
1477 match lexer.next_token().unwrap() {
1478 Token::Eof => break,
1479 token => tokens.push(token),
1480 }
1481 }
1482 assert!(tokens.len() > 10);
1483 }
1484
1485 #[test]
1486 fn test_lexer_keywords() {
1487 let input = b"obj endobj stream endstream startxref";
1488 let mut lexer = Lexer::new(Cursor::new(input));
1489
1490 assert_eq!(lexer.next_token().unwrap(), Token::Obj);
1491 assert_eq!(lexer.next_token().unwrap(), Token::EndObj);
1492 assert_eq!(lexer.next_token().unwrap(), Token::Stream);
1493 assert_eq!(lexer.next_token().unwrap(), Token::EndStream);
1494 assert_eq!(lexer.next_token().unwrap(), Token::StartXRef);
1495 }
1496
1497 #[test]
1498 fn test_lexer_multiple_comments() {
1499 let input = b"%First comment\n%Second comment\n123";
1500 let mut lexer = Lexer::new(Cursor::new(input));
1501
1502 assert_eq!(
1503 lexer.next_token().unwrap(),
1504 Token::Comment("First comment".to_string())
1505 );
1506 assert_eq!(
1507 lexer.next_token().unwrap(),
1508 Token::Comment("Second comment".to_string())
1509 );
1510 assert_eq!(lexer.next_token().unwrap(), Token::Integer(123));
1511 }
1512
1513 #[test]
1514 fn test_lexer_comment_without_newline() {
1515 let input = b"%Comment at end";
1516 let mut lexer = Lexer::new(Cursor::new(input));
1517
1518 assert_eq!(
1519 lexer.next_token().unwrap(),
1520 Token::Comment("Comment at end".to_string())
1521 );
1522 assert_eq!(lexer.next_token().unwrap(), Token::Eof);
1523 }
1524
1525 #[test]
1526 fn test_lexer_special_characters_in_streams() {
1527 let input = b"<< /Length 5 >> stream\nHello endstream";
1528 let mut lexer = Lexer::new(Cursor::new(input));
1529
1530 assert_eq!(lexer.next_token().unwrap(), Token::DictStart);
1531 assert_eq!(
1532 lexer.next_token().unwrap(),
1533 Token::Name("Length".to_string())
1534 );
1535 assert_eq!(lexer.next_token().unwrap(), Token::Integer(5));
1536 assert_eq!(lexer.next_token().unwrap(), Token::DictEnd);
1537 assert_eq!(lexer.next_token().unwrap(), Token::Stream);
1538 }
1540
1541 #[test]
1542 fn test_lexer_push_token() {
1543 let input = b"123 456";
1544 let mut lexer = Lexer::new(Cursor::new(input));
1545
1546 let token1 = lexer.next_token().unwrap();
1547 assert_eq!(token1, Token::Integer(123));
1548
1549 let token2 = lexer.next_token().unwrap();
1550 assert_eq!(token2, Token::Integer(456));
1551
1552 lexer.push_token(token2.clone());
1554
1555 let token3 = lexer.next_token().unwrap();
1557 assert_eq!(token3, token2);
1558
1559 let token4 = lexer.next_token().unwrap();
1561 assert_eq!(token4, Token::Eof);
1562 }
1563
1564 #[test]
1565 fn test_lexer_push_multiple_tokens() {
1566 let input = b"123";
1567 let mut lexer = Lexer::new(Cursor::new(input));
1568
1569 let original_token = lexer.next_token().unwrap();
1570 assert_eq!(original_token, Token::Integer(123));
1571
1572 lexer.push_token(Token::Boolean(true));
1574 lexer.push_token(Token::Boolean(false));
1575 lexer.push_token(Token::Null);
1576
1577 assert_eq!(lexer.next_token().unwrap(), Token::Null);
1579 assert_eq!(lexer.next_token().unwrap(), Token::Boolean(false));
1580 assert_eq!(lexer.next_token().unwrap(), Token::Boolean(true));
1581 assert_eq!(lexer.next_token().unwrap(), Token::Eof);
1582 }
1583
1584 #[test]
1585 fn test_lexer_read_newline() {
1586 let input = b"123\n456\r\n789";
1587 let mut lexer = Lexer::new(Cursor::new(input));
1588
1589 let digits1 = lexer.read_digits().unwrap();
1591 assert_eq!(digits1, "123");
1592 assert!(lexer.read_newline().is_ok());
1593
1594 let digits2 = lexer.read_digits().unwrap();
1596 assert_eq!(digits2, "456");
1597 assert!(lexer.read_newline().is_ok());
1598
1599 let digits3 = lexer.read_digits().unwrap();
1601 assert_eq!(digits3, "789");
1602 }
1603
1604 #[test]
1605 fn test_lexer_read_bytes() {
1606 let input = b"Hello World";
1607 let mut lexer = Lexer::new(Cursor::new(input));
1608
1609 let bytes = lexer.read_bytes(5).unwrap();
1610 assert_eq!(bytes, b"Hello");
1611
1612 let bytes = lexer.read_bytes(6).unwrap();
1613 assert_eq!(bytes, b" World");
1614 }
1615
1616 #[test]
1617 fn test_lexer_read_until_sequence() {
1618 let input = b"Hello endstream World";
1619 let mut lexer = Lexer::new(Cursor::new(input));
1620
1621 let result = lexer.read_until_sequence(b"endstream").unwrap();
1622 assert_eq!(result, b"Hello ");
1623
1624 let rest = lexer.read_digits().unwrap();
1626 assert_eq!(rest, ""); }
1628
1629 #[test]
1630 fn test_lexer_read_until_sequence_not_found() {
1631 let input = b"Hello World";
1632 let mut lexer = Lexer::new(Cursor::new(input));
1633
1634 let result = lexer.read_until_sequence(b"notfound");
1635 assert!(result.is_err());
1636 }
1637
1638 #[test]
1639 fn test_lexer_position_tracking() {
1640 let input = b"123 456";
1641 let mut lexer = Lexer::new(Cursor::new(input));
1642
1643 let initial_pos = lexer.position();
1644 assert_eq!(initial_pos, 0);
1645
1646 lexer.next_token().unwrap(); let pos_after_first = lexer.position();
1648 assert!(pos_after_first > initial_pos);
1649
1650 lexer.next_token().unwrap(); let pos_after_second = lexer.position();
1652 assert!(pos_after_second > pos_after_first);
1653 }
1654
1655 #[test]
1656 fn test_lexer_large_numbers() {
1657 let input = b"2147483647 -2147483648 9223372036854775807 -9223372036854775808";
1658 let mut lexer = Lexer::new(Cursor::new(input));
1659
1660 assert_eq!(lexer.next_token().unwrap(), Token::Integer(2147483647));
1661 assert_eq!(lexer.next_token().unwrap(), Token::Integer(-2147483648));
1662 assert_eq!(
1663 lexer.next_token().unwrap(),
1664 Token::Integer(9223372036854775807)
1665 );
1666 assert_eq!(
1667 lexer.next_token().unwrap(),
1668 Token::Integer(-9223372036854775808)
1669 );
1670 }
1671
1672 #[test]
1673 fn test_lexer_very_long_string() {
1674 let long_str = "A".repeat(1000);
1675 let input = format!("({long_str})");
1676 let mut lexer = Lexer::new(Cursor::new(input.as_bytes()));
1677
1678 if let Token::String(s) = lexer.next_token().unwrap() {
1679 assert_eq!(s.len(), 1000);
1680 assert_eq!(s, long_str.as_bytes());
1681 } else {
1682 panic!("Expected string token");
1683 }
1684 }
1685
1686 #[test]
1687 fn test_lexer_very_long_name() {
1688 let long_name = "A".repeat(500);
1689 let input = format!("/{long_name}");
1690 let mut lexer = Lexer::new(Cursor::new(input.as_bytes()));
1691
1692 if let Token::Name(name) = lexer.next_token().unwrap() {
1693 assert_eq!(name.len(), 500);
1694 assert_eq!(name, long_name);
1695 } else {
1696 panic!("Expected name token");
1697 }
1698 }
1699
1700 #[test]
1701 fn test_lexer_error_handling_invalid_hex() {
1702 let input = b"<48656C6C6FG>";
1703 let mut lexer = Lexer::new(Cursor::new(input));
1704
1705 let result = lexer.next_token();
1707 assert!(result.is_ok() || result.is_err()); }
1709
1710 #[test]
1711 fn test_lexer_all_token_types() {
1712 let input = b"true false null 123 -456 3.14 (string) <48656C6C6F> /Name [ ] << >> obj endobj stream endstream startxref % comment\n";
1713 let mut lexer = Lexer::new(Cursor::new(input));
1714
1715 let mut token_types = Vec::new();
1716 loop {
1717 match lexer.next_token().unwrap() {
1718 Token::Eof => break,
1719 token => token_types.push(std::mem::discriminant(&token)),
1720 }
1721 }
1722
1723 assert!(token_types.len() > 10);
1725 }
1726
1727 #[test]
1728 fn test_lexer_performance() {
1729 let input = "123 456 789 ".repeat(1000);
1730 let mut lexer = Lexer::new(Cursor::new(input.as_bytes()));
1731
1732 let start_time = std::time::Instant::now();
1733 let mut count = 0;
1734 loop {
1735 match lexer.next_token().unwrap() {
1736 Token::Eof => break,
1737 _ => count += 1,
1738 }
1739 }
1740 let elapsed = start_time.elapsed();
1741
1742 assert_eq!(count, 3000); assert!(elapsed.as_millis() < 1000); }
1745 }
1746
1747 #[test]
1748 fn test_lexer_find_keyword_ahead() {
1749 let input = b"some data here endstream more data";
1750 let mut lexer = Lexer::new(Cursor::new(input));
1751
1752 let result = lexer.find_keyword_ahead("endstream", 100);
1754 assert!(result.is_ok());
1755 assert_eq!(result.unwrap(), Some(15)); let result2 = lexer.find_keyword_ahead("notfound", 100);
1759 assert!(result2.is_ok());
1760 assert_eq!(result2.unwrap(), None);
1761
1762 let result3 = lexer.find_keyword_ahead("endstream", 10);
1764 assert!(result3.is_ok());
1765 assert_eq!(result3.unwrap(), None); }
1767
1768 #[test]
1769 fn test_lexer_peek_token() {
1770 let input = b"123 456 /Name";
1771 let mut lexer = Lexer::new(Cursor::new(input));
1772
1773 let peeked = lexer.peek_token();
1775 assert!(peeked.is_ok());
1776 assert_eq!(peeked.unwrap(), Token::Integer(123));
1777
1778 let next = lexer.next_token();
1780 assert!(next.is_ok());
1781 assert_eq!(next.unwrap(), Token::Integer(123));
1782
1783 assert_eq!(lexer.peek_token().unwrap(), Token::Integer(456));
1785 assert_eq!(lexer.next_token().unwrap(), Token::Integer(456));
1786
1787 assert_eq!(lexer.peek_token().unwrap(), Token::Name("Name".to_string()));
1788 assert_eq!(lexer.next_token().unwrap(), Token::Name("Name".to_string()));
1789 }
1790
1791 #[test]
1792 fn test_lexer_expect_keyword() {
1793 let input = b"endstream obj endobj";
1794 let mut lexer = Lexer::new(Cursor::new(input));
1795
1796 assert!(lexer.expect_keyword("endstream").is_ok());
1798
1799 assert!(lexer.expect_keyword("obj").is_ok());
1801
1802 let result = lexer.expect_keyword("stream");
1804 assert!(result.is_err());
1805 match result {
1806 Err(ParseError::UnexpectedToken { expected, found }) => {
1807 assert!(expected.contains("stream"));
1808 assert!(found.contains("EndObj"));
1809 }
1810 _ => panic!("Expected UnexpectedToken error"),
1811 }
1812 }
1813
1814 #[test]
1815 fn test_lexer_save_restore_position() {
1816 let input = b"123 456 789";
1817 let mut lexer = Lexer::new(Cursor::new(input));
1818
1819 assert_eq!(lexer.next_token().unwrap(), Token::Integer(123));
1821
1822 let saved = lexer.save_position();
1824 assert!(saved.is_ok());
1825 let saved_pos = saved.unwrap();
1826
1827 assert_eq!(lexer.next_token().unwrap(), Token::Integer(456));
1829 assert_eq!(lexer.next_token().unwrap(), Token::Integer(789));
1830
1831 assert!(lexer.restore_position(saved_pos).is_ok());
1833
1834 assert_eq!(lexer.next_token().unwrap(), Token::Integer(456));
1836 }
1837
1838 #[test]
1839 fn test_lexer_character_encoding_recovery() {
1840 let input = b"(Caf\x80 \x91Hello\x92)"; let options = ParseOptions::lenient();
1843 let mut lexer = Lexer::new_with_options(Cursor::new(input), options);
1844
1845 match lexer.next_token().unwrap() {
1846 Token::String(bytes) => {
1847 let text = String::from_utf8_lossy(&bytes);
1849 tracing::debug!("Recovered text: {text}");
1850 assert!(!text.is_empty()); }
1852 other => panic!("Expected String token, got {other:?}"),
1853 }
1854
1855 let warnings = lexer.warnings();
1857 if !warnings.is_empty() {
1858 tracing::debug!("Encoding warnings: {warnings:?}");
1859 }
1860 }
1861
1862 fn lexer_no_encoding(data: &[u8]) -> Lexer<Cursor<&[u8]>> {
1864 let mut opts = ParseOptions::default();
1865 opts.lenient_encoding = false;
1866 Lexer::new_with_options(Cursor::new(data), opts)
1867 }
1868
1869 #[test]
1870 fn test_lexer_octal_escape_overflow_777_raw() {
1871 let mut lexer = lexer_no_encoding(b"(\\777)");
1875 match lexer.next_token().unwrap() {
1876 Token::String(bytes) => assert_eq!(bytes, vec![0xFF]),
1877 other => panic!("Expected String token, got {other:?}"),
1878 }
1879 }
1880
1881 #[test]
1882 fn test_lexer_octal_escape_overflow_400_raw() {
1883 let mut lexer = lexer_no_encoding(b"(\\400)");
1885 match lexer.next_token().unwrap() {
1886 Token::String(bytes) => assert_eq!(bytes, vec![0x00]),
1887 other => panic!("Expected String token, got {other:?}"),
1888 }
1889 }
1890
1891 #[test]
1892 fn test_lexer_octal_escape_max_valid_377_raw() {
1893 let mut lexer = lexer_no_encoding(b"(\\377)");
1895 match lexer.next_token().unwrap() {
1896 Token::String(bytes) => assert_eq!(bytes, vec![0xFF]),
1897 other => panic!("Expected String token, got {other:?}"),
1898 }
1899 }
1900
1901 #[test]
1902 fn test_lexer_octal_escape_overflow_mixed_raw() {
1903 let mut lexer = lexer_no_encoding(b"(A\\777B\\101C)");
1905 match lexer.next_token().unwrap() {
1906 Token::String(bytes) => {
1907 assert_eq!(bytes, vec![b'A', 0xFF, b'B', b'A', b'C']);
1908 }
1909 other => panic!("Expected String token, got {other:?}"),
1910 }
1911 }
1912
1913 #[test]
1914 fn test_lexer_octal_escape_overflow_no_panic_with_encoding() {
1915 let mut lexer = Lexer::new(Cursor::new(b"(\\777\\400\\577)" as &[u8]));
1917 match lexer.next_token().unwrap() {
1918 Token::String(bytes) => {
1919 assert!(!bytes.is_empty());
1922 }
1923 other => panic!("Expected String token, got {other:?}"),
1924 }
1925 }
1926}