fast_rustc_ap_rustc_lexer/lib.rs
1//! Low-level Rust lexer.
2//!
3//! Tokens produced by this lexer are not yet ready for parsing the Rust syntax,
4//! for that see `librustc_parse::lexer`, which converts this basic token stream
5//! into wide tokens used by actual parser.
6//!
7//! The purpose of this crate is to convert raw sources into a labeled sequence
8//! of well-known token types, so building an actual Rust token stream will
9//! be easier.
10//!
11//! Main entity of this crate is [`TokenKind`] enum which represents common
12//! lexeme types.
13
14// We want to be able to build this crate with a stable compiler, so no
15// `#![feature]` attributes should be added.
16
17mod cursor;
18pub mod unescape;
19
20use self::LiteralKind::*;
21use self::TokenKind::*;
22use crate::cursor::{Cursor, EOF_CHAR};
23
24/// Parsed token.
25/// It doesn't contain information about data that has been parsed,
26/// only the type of the token and its size.
27pub struct Token {
28 pub kind: TokenKind,
29 pub len: usize,
30}
31
32impl Token {
33 fn new(kind: TokenKind, len: usize) -> Token {
34 Token { kind, len }
35 }
36}
37
38/// Enum representing common lexeme types.
39#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
40pub enum TokenKind {
41 // Multi-char tokens:
42 /// "// comment"
43 LineComment,
44 /// "/* block comment */"
45 /// Block comments can be recursive, so the sequence like "/* /* */"
46 /// will not be considered terminated and will result in a parsing error.
47 BlockComment { terminated: bool },
48 /// Any whitespace characters sequence.
49 Whitespace,
50 /// "ident" or "continue"
51 /// At this step keywords are also considered identifiers.
52 Ident,
53 /// "r#ident"
54 RawIdent,
55 /// "12_u8", "1.0e-40", "b"123"". See `LiteralKind` for more details.
56 Literal { kind: LiteralKind, suffix_start: usize },
57 /// "'a"
58 Lifetime { starts_with_number: bool },
59
60 // One-char tokens:
61 /// ";"
62 Semi,
63 /// ","
64 Comma,
65 /// "."
66 Dot,
67 /// "("
68 OpenParen,
69 /// ")"
70 CloseParen,
71 /// "{"
72 OpenBrace,
73 /// "}"
74 CloseBrace,
75 /// "["
76 OpenBracket,
77 /// "]"
78 CloseBracket,
79 /// "@"
80 At,
81 /// "#"
82 Pound,
83 /// "~"
84 Tilde,
85 /// "?"
86 Question,
87 /// ":"
88 Colon,
89 /// "$"
90 Dollar,
91 /// "="
92 Eq,
93 /// "!"
94 Not,
95 /// "<"
96 Lt,
97 /// ">"
98 Gt,
99 /// "-"
100 Minus,
101 /// "&"
102 And,
103 /// "|"
104 Or,
105 /// "+"
106 Plus,
107 /// "*"
108 Star,
109 /// "/"
110 Slash,
111 /// "^"
112 Caret,
113 /// "%"
114 Percent,
115
116 /// Unknown token, not expected by the lexer, e.g. "№"
117 Unknown,
118}
119
120#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
121pub enum LiteralKind {
122 /// "12_u8", "0o100", "0b120i99"
123 Int { base: Base, empty_int: bool },
124 /// "12.34f32", "0b100.100"
125 Float { base: Base, empty_exponent: bool },
126 /// "'a'", "'\\'", "'''", "';"
127 Char { terminated: bool },
128 /// "b'a'", "b'\\'", "b'''", "b';"
129 Byte { terminated: bool },
130 /// ""abc"", ""abc"
131 Str { terminated: bool },
132 /// "b"abc"", "b"abc"
133 ByteStr { terminated: bool },
134 /// "r"abc"", "r#"abc"#", "r####"ab"###"c"####", "r#"a"
135 RawStr { n_hashes: usize, started: bool, terminated: bool },
136 /// "br"abc"", "br#"abc"#", "br####"ab"###"c"####", "br#"a"
137 RawByteStr { n_hashes: usize, started: bool, terminated: bool },
138}
139
140/// Base of numeric literal encoding according to its prefix.
141#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
142pub enum Base {
143 /// Literal starts with "0b".
144 Binary,
145 /// Literal starts with "0o".
146 Octal,
147 /// Literal starts with "0x".
148 Hexadecimal,
149 /// Literal doesn't contain a prefix.
150 Decimal,
151}
152
153/// `rustc` allows files to have a shebang, e.g. "#!/usr/bin/rustrun",
154/// but shebang isn't a part of rust syntax, so this function
155/// skips the line if it starts with a shebang ("#!").
156/// Line won't be skipped if it represents a valid Rust syntax
157/// (e.g. "#![deny(missing_docs)]").
158pub fn strip_shebang(input: &str) -> Option<usize> {
159 debug_assert!(!input.is_empty());
160 if !input.starts_with("#!") || input.starts_with("#![") {
161 return None;
162 }
163 Some(input.find('\n').unwrap_or(input.len()))
164}
165
166/// Parses the first token from the provided input string.
167pub fn first_token(input: &str) -> Token {
168 debug_assert!(!input.is_empty());
169 Cursor::new(input).advance_token()
170}
171
172/// Creates an iterator that produces tokens from the input string.
173pub fn tokenize(mut input: &str) -> impl Iterator<Item = Token> + '_ {
174 std::iter::from_fn(move || {
175 if input.is_empty() {
176 return None;
177 }
178 let token = first_token(input);
179 input = &input[token.len..];
180 Some(token)
181 })
182}
183
184/// True if `c` is considered a whitespace according to Rust language definition.
185/// See [Rust language reference](https://doc.rust-lang.org/reference/whitespace.html)
186/// for definitions of these classes.
187pub fn is_whitespace(c: char) -> bool {
188 // This is Pattern_White_Space.
189 //
190 // Note that this set is stable (ie, it doesn't change with different
191 // Unicode versions), so it's ok to just hard-code the values.
192
193 match c {
194 // Usual ASCII suspects
195 | '\u{0009}' // \t
196 | '\u{000A}' // \n
197 | '\u{000B}' // vertical tab
198 | '\u{000C}' // form feed
199 | '\u{000D}' // \r
200 | '\u{0020}' // space
201
202 // NEXT LINE from latin1
203 | '\u{0085}'
204
205 // Bidi markers
206 | '\u{200E}' // LEFT-TO-RIGHT MARK
207 | '\u{200F}' // RIGHT-TO-LEFT MARK
208
209 // Dedicated whitespace characters from Unicode
210 | '\u{2028}' // LINE SEPARATOR
211 | '\u{2029}' // PARAGRAPH SEPARATOR
212 => true,
213 _ => false,
214 }
215}
216
217/// True if `c` is valid as a first character of an identifier.
218/// See [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html) for
219/// a formal definition of valid identifier name.
220pub fn is_id_start(c: char) -> bool {
221 // This is XID_Start OR '_' (which formally is not a XID_Start).
222 // We also add fast-path for ascii idents
223 ('a' <= c && c <= 'z')
224 || ('A' <= c && c <= 'Z')
225 || c == '_'
226 || (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_start(c))
227}
228
229/// True if `c` is valid as a non-first character of an identifier.
230/// See [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html) for
231/// a formal definition of valid identifier name.
232pub fn is_id_continue(c: char) -> bool {
233 // This is exactly XID_Continue.
234 // We also add fast-path for ascii idents
235 ('a' <= c && c <= 'z')
236 || ('A' <= c && c <= 'Z')
237 || ('0' <= c && c <= '9')
238 || c == '_'
239 || (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_continue(c))
240}
241
242impl Cursor<'_> {
243 /// Parses a token from the input string.
244 fn advance_token(&mut self) -> Token {
245 let first_char = self.bump().unwrap();
246 let token_kind = match first_char {
247 // Slash, comment or block comment.
248 '/' => match self.first() {
249 '/' => self.line_comment(),
250 '*' => self.block_comment(),
251 _ => Slash,
252 },
253
254 // Whitespace sequence.
255 c if is_whitespace(c) => self.whitespace(),
256
257 // Raw identifier, raw string literal or identifier.
258 'r' => match (self.first(), self.second()) {
259 ('#', c1) if is_id_start(c1) => self.raw_ident(),
260 ('#', _) | ('"', _) => {
261 let (n_hashes, started, terminated) = self.raw_double_quoted_string();
262 let suffix_start = self.len_consumed();
263 if terminated {
264 self.eat_literal_suffix();
265 }
266 let kind = RawStr { n_hashes, started, terminated };
267 Literal { kind, suffix_start }
268 }
269 _ => self.ident(),
270 },
271
272 // Byte literal, byte string literal, raw byte string literal or identifier.
273 'b' => match (self.first(), self.second()) {
274 ('\'', _) => {
275 self.bump();
276 let terminated = self.single_quoted_string();
277 let suffix_start = self.len_consumed();
278 if terminated {
279 self.eat_literal_suffix();
280 }
281 let kind = Byte { terminated };
282 Literal { kind, suffix_start }
283 }
284 ('"', _) => {
285 self.bump();
286 let terminated = self.double_quoted_string();
287 let suffix_start = self.len_consumed();
288 if terminated {
289 self.eat_literal_suffix();
290 }
291 let kind = ByteStr { terminated };
292 Literal { kind, suffix_start }
293 }
294 ('r', '"') | ('r', '#') => {
295 self.bump();
296 let (n_hashes, started, terminated) = self.raw_double_quoted_string();
297 let suffix_start = self.len_consumed();
298 if terminated {
299 self.eat_literal_suffix();
300 }
301 let kind = RawByteStr { n_hashes, started, terminated };
302 Literal { kind, suffix_start }
303 }
304 _ => self.ident(),
305 },
306
307 // Identifier (this should be checked after other variant that can
308 // start as identifier).
309 c if is_id_start(c) => self.ident(),
310
311 // Numeric literal.
312 c @ '0'..='9' => {
313 let literal_kind = self.number(c);
314 let suffix_start = self.len_consumed();
315 self.eat_literal_suffix();
316 TokenKind::Literal { kind: literal_kind, suffix_start }
317 }
318
319 // One-symbol tokens.
320 ';' => Semi,
321 ',' => Comma,
322 '.' => Dot,
323 '(' => OpenParen,
324 ')' => CloseParen,
325 '{' => OpenBrace,
326 '}' => CloseBrace,
327 '[' => OpenBracket,
328 ']' => CloseBracket,
329 '@' => At,
330 '#' => Pound,
331 '~' => Tilde,
332 '?' => Question,
333 ':' => Colon,
334 '$' => Dollar,
335 '=' => Eq,
336 '!' => Not,
337 '<' => Lt,
338 '>' => Gt,
339 '-' => Minus,
340 '&' => And,
341 '|' => Or,
342 '+' => Plus,
343 '*' => Star,
344 '^' => Caret,
345 '%' => Percent,
346
347 // Lifetime or character literal.
348 '\'' => self.lifetime_or_char(),
349
350 // String literal.
351 '"' => {
352 let terminated = self.double_quoted_string();
353 let suffix_start = self.len_consumed();
354 if terminated {
355 self.eat_literal_suffix();
356 }
357 let kind = Str { terminated };
358 Literal { kind, suffix_start }
359 }
360 _ => Unknown,
361 };
362 Token::new(token_kind, self.len_consumed())
363 }
364
365 fn line_comment(&mut self) -> TokenKind {
366 debug_assert!(self.prev() == '/' && self.first() == '/');
367 self.bump();
368 self.eat_while(|c| c != '\n');
369 LineComment
370 }
371
372 fn block_comment(&mut self) -> TokenKind {
373 debug_assert!(self.prev() == '/' && self.first() == '*');
374 self.bump();
375 let mut depth = 1usize;
376 while let Some(c) = self.bump() {
377 match c {
378 '/' if self.first() == '*' => {
379 self.bump();
380 depth += 1;
381 }
382 '*' if self.first() == '/' => {
383 self.bump();
384 depth -= 1;
385 if depth == 0 {
386 // This block comment is closed, so for a construction like "/* */ */"
387 // there will be a successfully parsed block comment "/* */"
388 // and " */" will be processed separately.
389 break;
390 }
391 }
392 _ => (),
393 }
394 }
395
396 BlockComment { terminated: depth == 0 }
397 }
398
399 fn whitespace(&mut self) -> TokenKind {
400 debug_assert!(is_whitespace(self.prev()));
401 self.eat_while(is_whitespace);
402 Whitespace
403 }
404
405 fn raw_ident(&mut self) -> TokenKind {
406 debug_assert!(self.prev() == 'r' && self.first() == '#' && is_id_start(self.second()));
407 // Eat "#" symbol.
408 self.bump();
409 // Eat the identifier part of RawIdent.
410 self.eat_identifier();
411 RawIdent
412 }
413
414 fn ident(&mut self) -> TokenKind {
415 debug_assert!(is_id_start(self.prev()));
416 // Start is already eaten, eat the rest of identifier.
417 self.eat_while(is_id_continue);
418 Ident
419 }
420
421 fn number(&mut self, first_digit: char) -> LiteralKind {
422 debug_assert!('0' <= self.prev() && self.prev() <= '9');
423 let mut base = Base::Decimal;
424 if first_digit == '0' {
425 // Attempt to parse encoding base.
426 let has_digits = match self.first() {
427 'b' => {
428 base = Base::Binary;
429 self.bump();
430 self.eat_decimal_digits()
431 }
432 'o' => {
433 base = Base::Octal;
434 self.bump();
435 self.eat_decimal_digits()
436 }
437 'x' => {
438 base = Base::Hexadecimal;
439 self.bump();
440 self.eat_hexadecimal_digits()
441 }
442 // Not a base prefix.
443 '0'..='9' | '_' | '.' | 'e' | 'E' => {
444 self.eat_decimal_digits();
445 true
446 }
447 // Just a 0.
448 _ => return Int { base, empty_int: false },
449 };
450 // Base prefix was provided, but there were no digits
451 // after it, e.g. "0x".
452 if !has_digits {
453 return Int { base, empty_int: true };
454 }
455 } else {
456 // No base prefix, parse number in the usual way.
457 self.eat_decimal_digits();
458 };
459
460 match self.first() {
461 // Don't be greedy if this is actually an
462 // integer literal followed by field/method access or a range pattern
463 // (`0..2` and `12.foo()`)
464 '.' if self.second() != '.' && !is_id_start(self.second()) => {
465 // might have stuff after the ., and if it does, it needs to start
466 // with a number
467 self.bump();
468 let mut empty_exponent = false;
469 if self.first().is_digit(10) {
470 self.eat_decimal_digits();
471 match self.first() {
472 'e' | 'E' => {
473 self.bump();
474 empty_exponent = !self.eat_float_exponent();
475 }
476 _ => (),
477 }
478 }
479 Float { base, empty_exponent }
480 }
481 'e' | 'E' => {
482 self.bump();
483 let empty_exponent = !self.eat_float_exponent();
484 Float { base, empty_exponent }
485 }
486 _ => Int { base, empty_int: false },
487 }
488 }
489
490 fn lifetime_or_char(&mut self) -> TokenKind {
491 debug_assert!(self.prev() == '\'');
492
493 let can_be_a_lifetime = if self.second() == '\'' {
494 // It's surely not a lifetime.
495 false
496 } else {
497 // If the first symbol is valid for identifier, it can be a lifetime.
498 // Also check if it's a number for a better error reporting (so '0 will
499 // be reported as invalid lifetime and not as unterminated char literal).
500 is_id_start(self.first()) || self.first().is_digit(10)
501 };
502
503 if !can_be_a_lifetime {
504 let terminated = self.single_quoted_string();
505 let suffix_start = self.len_consumed();
506 if terminated {
507 self.eat_literal_suffix();
508 }
509 let kind = Char { terminated };
510 return Literal { kind, suffix_start };
511 }
512
513 // Either a lifetime or a character literal with
514 // length greater than 1.
515
516 let starts_with_number = self.first().is_digit(10);
517
518 // Skip the literal contents.
519 // First symbol can be a number (which isn't a valid identifier start),
520 // so skip it without any checks.
521 self.bump();
522 self.eat_while(is_id_continue);
523
524 // Check if after skipping literal contents we've met a closing
525 // single quote (which means that user attempted to create a
526 // string with single quotes).
527 if self.first() == '\'' {
528 self.bump();
529 let kind = Char { terminated: true };
530 return Literal { kind, suffix_start: self.len_consumed() };
531 }
532
533 return Lifetime { starts_with_number };
534 }
535
536 fn single_quoted_string(&mut self) -> bool {
537 debug_assert!(self.prev() == '\'');
538 // Check if it's a one-symbol literal.
539 if self.second() == '\'' && self.first() != '\\' {
540 self.bump();
541 self.bump();
542 return true;
543 }
544
545 // Literal has more than one symbol.
546
547 // Parse until either quotes are terminated or error is detected.
548 loop {
549 match self.first() {
550 // Quotes are terminated, finish parsing.
551 '\'' => {
552 self.bump();
553 return true;
554 }
555 // Probably beginning of the comment, which we don't want to include
556 // to the error report.
557 '/' => break,
558 // Newline without following '\'' means unclosed quote, stop parsing.
559 '\n' if self.second() != '\'' => break,
560 // End of file, stop parsing.
561 EOF_CHAR if self.is_eof() => break,
562 // Escaped slash is considered one character, so bump twice.
563 '\\' => {
564 self.bump();
565 self.bump();
566 }
567 // Skip the character.
568 _ => {
569 self.bump();
570 }
571 }
572 }
573 // String was not terminated.
574 false
575 }
576
577 /// Eats double-quoted string and returns true
578 /// if string is terminated.
579 fn double_quoted_string(&mut self) -> bool {
580 debug_assert!(self.prev() == '"');
581 while let Some(c) = self.bump() {
582 match c {
583 '"' => {
584 return true;
585 }
586 '\\' if self.first() == '\\' || self.first() == '"' => {
587 // Bump again to skip escaped character.
588 self.bump();
589 }
590 _ => (),
591 }
592 }
593 // End of file reached.
594 false
595 }
596
597 /// Eats the double-quoted string and returns a tuple of
598 /// (amount of the '#' symbols, raw string started, raw string terminated)
599 fn raw_double_quoted_string(&mut self) -> (usize, bool, bool) {
600 debug_assert!(self.prev() == 'r');
601 let mut started: bool = false;
602 let mut finished: bool = false;
603
604 // Count opening '#' symbols.
605 let n_hashes = self.eat_while(|c| c == '#');
606
607 // Check that string is started.
608 match self.bump() {
609 Some('"') => started = true,
610 _ => return (n_hashes, started, finished),
611 }
612
613 // Skip the string contents and on each '#' character met, check if this is
614 // a raw string termination.
615 while !finished {
616 self.eat_while(|c| c != '"');
617
618 if self.is_eof() {
619 return (n_hashes, started, finished);
620 }
621
622 // Eat closing double quote.
623 self.bump();
624
625 // Check that amount of closing '#' symbols
626 // is equal to the amount of opening ones.
627 let mut hashes_left = n_hashes;
628 let is_closing_hash = |c| {
629 if c == '#' && hashes_left != 0 {
630 hashes_left -= 1;
631 true
632 } else {
633 false
634 }
635 };
636 finished = self.eat_while(is_closing_hash) == n_hashes;
637 }
638
639 (n_hashes, started, finished)
640 }
641
642 fn eat_decimal_digits(&mut self) -> bool {
643 let mut has_digits = false;
644 loop {
645 match self.first() {
646 '_' => {
647 self.bump();
648 }
649 '0'..='9' => {
650 has_digits = true;
651 self.bump();
652 }
653 _ => break,
654 }
655 }
656 has_digits
657 }
658
659 fn eat_hexadecimal_digits(&mut self) -> bool {
660 let mut has_digits = false;
661 loop {
662 match self.first() {
663 '_' => {
664 self.bump();
665 }
666 '0'..='9' | 'a'..='f' | 'A'..='F' => {
667 has_digits = true;
668 self.bump();
669 }
670 _ => break,
671 }
672 }
673 has_digits
674 }
675
676 /// Eats the float exponent. Returns true if at least one digit was met,
677 /// and returns false otherwise.
678 fn eat_float_exponent(&mut self) -> bool {
679 debug_assert!(self.prev() == 'e' || self.prev() == 'E');
680 if self.first() == '-' || self.first() == '+' {
681 self.bump();
682 }
683 self.eat_decimal_digits()
684 }
685
686 // Eats the suffix of the literal, e.g. "_u8".
687 fn eat_literal_suffix(&mut self) {
688 self.eat_identifier();
689 }
690
691 // Eats the identifier.
692 fn eat_identifier(&mut self) {
693 if !is_id_start(self.first()) {
694 return;
695 }
696 self.bump();
697
698 self.eat_while(is_id_continue);
699 }
700
701 /// Eats symbols while predicate returns true or until the end of file is reached.
702 /// Returns amount of eaten symbols.
703 fn eat_while<F>(&mut self, mut predicate: F) -> usize
704 where
705 F: FnMut(char) -> bool,
706 {
707 let mut eaten: usize = 0;
708 while predicate(self.first()) && !self.is_eof() {
709 eaten += 1;
710 self.bump();
711 }
712
713 eaten
714 }
715}