1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
//! # Lexer module
//!
//! The lexer module is responsible for tokenising input strings. The lexer supports
//! various token types such as identifiers, numbers, strings, and operators. The lexer
//! uses a cursor-based approach to iterate over the input string and extract tokens.
//!
//! The lexer is implemented as a struct called `Lexer`, which provides methods for
//! tokenising input strings into individual tokens. The `Lexer` struct contains an
//! iterator over the characters of the input string, and uses this iterator to extract
//! tokens from the input.
//!
//! The `Lexer` struct provides a method called `next_token`, which advances the lexer to
//! the next token in the input stream and returns the token. This method is essentially a
//! large switch statement, containing branches corresponding to every token type. The
//! `next_token` method skips any whitespace and comments before identifying the next token.
//!
//! The token is represented by a `Token` struct, which contains information about its kind
//! (e.g., identifier, operator, literal) and its span in the input stream.
//!
//! The lexer module is used by the parser to tokenise the input string before parsing it
//! into an abstract syntax tree (AST).
use std::str::Chars;

use shared::span::Span;
use token::{Token, TokenKind};

// Attempt to obtain the current version of the lexer module.
pub const VERSION: Option<&str> = std::option_env!("CARGO_PKG_VERSION");

#[cfg(test)]
mod test;

pub mod token;

/// Lexer for tokenising input strings.
///
/// The `Lexer` provides methods for tokenising input strings into individual tokens.
/// It supports various token types such as identifiers, numbers, strings, and operators.
/// The `Lexer` uses a cursor-based approach to iterate over the input string and extract
/// tokens.
pub struct Lexer<'lexer> {
    /// Represents the input for the lexer.
    ///
    /// The `input` field is of type `Chars<'lexer>`, which is an iterator over the
    /// characters of a string. Using `Chars` instead of just a raw string allows for
    /// iteration over the string one character at a time. Notably, it supports
    /// unicode characters and characters of unusual length.
    input: Chars<'lexer>,
    chr: char,
    position: usize,
}

impl<'lexer> Lexer<'lexer> {
    /// Creates a new Lexer instance.
    ///
    /// # Arguments
    ///
    /// * `input` - The input string to be tokenised.
    pub fn new(input: &'lexer str) -> Self {
        let mut lexer = Lexer {
            input: input.chars(),
            chr: char::from(0),
            position: 0,
        };
        lexer.read_char();
        // We set position to 0 here because `read_char()` increments position to 1,
        // but we want to start the index at 0 for consistency.
        lexer.position = 0;
        lexer
    }

    /// Reads the next character from the input stream and updates the lexer's internal
    /// state.
    fn read_char(&mut self) {
        match self.input.next() {
            Some(chr) => {
                self.chr = chr;
                self.position += 1
            }
            None => {
                // '\0' indicates the end of the file.
                // If we are already at the end of the file, there is no need to update the
                // character or increment the position.
                if self.chr != '\0' {
                    self.chr = '\0';
                    self.position += 1
                }
            }
        }
    }

    /// Returns the next character in the input stream without consuming it.
    ///
    /// # Returns
    ///
    /// The next character in the input stream, or `'\0'` if the end of the stream has
    /// been reached.
    fn peek_char(&mut self) -> char {
        // Clones the iterator to peek ahead without advancing it.
        match self.input.clone().next() {
            Some(chr) => chr,
            None => '\0',
        }
    }

    /// Advances the lexer to the next token in the input stream and returns the token.
    ///
    /// This function is essentially a large switch statement, containing branches
    /// corresponding to every token type. This function skips any whitespace and
    /// comments before identifying the next token. The token is represented by a
    /// `Token` struct, which contains information about its kind (e.g., identifier,
    /// operator, literal) and its span in the input stream.
    ///
    /// # Returns
    ///
    /// The next token in the sequence, and will continue to return an Eof token once the
    /// end is reached.
    pub fn next_token(&mut self) -> Token {
        let start_position = self.position;

        // Skip over any whitespace, comments, and newlines.
        match self.skip_garbage() {
            Ok(encountered_newline) => {
                // If we encountered a newline character (`\n`), we return a NewLine token.
                if encountered_newline {
                    return Token {
                        span: Span::new(self.position - 1, self.position),
                        kind: TokenKind::NewLine,
                    };
                }
            }
            // The only type of error that can be returned is an unterminated multi-line
            // comment, so we can safely unwrap the error and return the corresponding
            // token.
            Err(_) => {
                return Token {
                    span: Span::new(start_position, self.position),
                    kind: TokenKind::UnterminatedComment,
                };
            }
        }

        let start_position = self.position;

        // Determine what type of token we are dealing with.
        let token_kind = match self.chr {
            // Single character symbols
            '+' => TokenKind::Plus,
            '-' => TokenKind::Minus,
            '*' => TokenKind::Mult,
            '/' => TokenKind::Div,
            '%' => TokenKind::Mod,

            ',' => TokenKind::Comma,
            ';' => TokenKind::Semicolon,
            ':' => TokenKind::Colon,

            '(' => TokenKind::LParen,
            ')' => TokenKind::RParen,
            '{' => TokenKind::LCurly,
            '}' => TokenKind::RCurly,
            '[' => TokenKind::LBracket,
            ']' => TokenKind::RBracket,

            '\0' => TokenKind::Eof,
            '\n' => TokenKind::NewLine,

            // Potentially double character symbols
            '=' => {
                if self.peek_char() == '=' {
                    self.read_char();
                    TokenKind::Eq
                } else {
                    TokenKind::Assign
                }
            }
            '!' => {
                if self.peek_char() == '=' {
                    self.read_char();
                    TokenKind::NotEq
                } else {
                    TokenKind::Illegal(self.chr.to_string())
                }
            }
            '<' => {
                if self.peek_char() == '=' {
                    self.read_char();
                    TokenKind::LtEq
                } else {
                    TokenKind::Lt
                }
            }
            '>' => {
                if self.peek_char() == '=' {
                    self.read_char();
                    TokenKind::GtEq
                } else {
                    TokenKind::Gt
                }
            }

            // The following rules return immediately, because the size of the token is not fixed,
            // so is handled separately.

            // String literals
            '"' => {
                match self.read_string() {
                    Ok(string) => {
                        return Token {
                            span: Span {
                                start: start_position,
                                end: self.position,
                            },
                            kind: TokenKind::String(string),
                        }
                    }
                    // An error represents an unterminated string.
                    Err(_) => {
                        return Token {
                            span: Span {
                                start: start_position,
                                end: self.position,
                            },
                            kind: TokenKind::UnterminatedString,
                        }
                    }
                };
            }

            // Else, we a dealing with a keyword, identifier, number of an illegal character.
            _ => {
                if is_valid_ident_start(self.chr) {
                    let ident = self.read_identifier();
                    return Token {
                        span: Span {
                            start: start_position,
                            end: self.position,
                        },
                        kind: TokenKind::lookup_ident(&ident),
                    };
                } else if is_digit(self.chr) {
                    return self.read_number();
                } else {
                    TokenKind::Illegal(self.chr.to_string())
                }
            }
        };

        // Since every branch advances the character by at least one, we move the function out
        // here to simplify the syntax.
        self.read_char();

        return Token {
            span: Span {
                start: start_position,
                end: self.position,
            },
            kind: token_kind,
        };
    }

    /// Reads a string literal (including quotes) from the current position in the input.
    ///
    /// # Returns
    ///
    /// A `String` containing the contents of the string literal or an `Err(())` if the
    /// string was not terminated.
    fn read_string(&mut self) -> Result<String, ()> {
        let mut string = String::new();
        // Read opening '"'
        self.read_char();

        // Read string contents
        while !(self.chr == '"') {
            if self.chr == '\0' {
                return Err(());
            }
            string.push(self.chr);
            self.read_char();
        }

        // Read closing '"'
        self.read_char();

        Ok(string)
    }

    /// Reads an identifier starting from the current character position.
    ///
    /// # Returns
    ///
    /// A `String` representing the identifier extracted from the input.
    fn read_identifier(&mut self) -> String {
        let mut ident = String::new();

        while is_valid_ident_continue(self.chr) {
            ident.push(self.chr);
            self.read_char();
        }
        ident
    }

    /// Reads a number from the current position in the input and constructs a `Token`.
    ///
    /// This function reads a sequence of digits as an integer. If a decimal point is
    /// encountered, it continues to read the fractional part, constructing a
    /// floating-point number.
    ///
    /// # Returns
    ///
    /// A `Token` representing either an integer or a floating-point number, depending on
    /// the input.
    fn read_number(&mut self) -> Token {
        let mut number = String::new();
        self._read_int(&mut number);

        // If we encounter a decimal point, we continue to read the fractional part.
        if self.chr == '.' {
            number.push(self.chr);
            self.read_char();
            self._read_int(&mut number);
            return Token {
                span: Span {
                    start: self.position - number.len(),
                    end: self.position,
                },
                kind: TokenKind::Float(number),
            };
        } else {
            return Token {
                span: Span {
                    start: self.position - number.len(),
                    end: self.position,
                },
                kind: TokenKind::Int(number),
            };
        }
    }

    /// Reads and appends digits to a given string from the current position in the input.
    ///
    /// # Arguments
    ///
    /// * `number` - A mutable reference to a `String` where the digits are appended.
    fn _read_int(&mut self, number: &mut String) {
        while is_digit(self.chr) {
            number.push(self.chr);
            self.read_char();
        }
    }

    /// Skips over a single-line comment (`//`) in the current input.
    ///
    /// It reads characters until it reaches the end of the line or the end of the input.
    ///
    /// Assumes that the current character (`self.chr`) is the first slash.
    fn skip_comment(&mut self) {
        if self.chr == '/' && self.peek_char() == '/' {
            // Read the '//'
            self.read_char();
            self.read_char();

            // Read the comment till the end of the line, or the end of the input.
            loop {
                self.read_char();
                if self.chr == '\n' {
                    self.read_char();
                    break;
                }
                if self.chr == '\0' {
                    break;
                }
            }
        }
    }

    /// Skips over a multi-line comment (`/* ... */`) in the current input.
    ///
    /// Assumes that the current character (`self.chr`) is the first slash.
    ///
    /// # Returns
    ///
    /// An `Ok(())` if the multi-line comment was successfully skipped, or an
    /// `Err(())` error if the comment was not terminated.
    fn skip_multi_comment(&mut self) -> Result<(), ()> {
        // Consume the opening '/*'
        if self.chr == '/' && self.peek_char() == '*' {
            self.read_char();
            self.read_char();
        } else {
            return Ok(());
        }
        // Consume the comment
        while !(self.chr == '*' && self.peek_char() == '/') {
            self.read_char();
            if self.chr == '\0' {
                return Err(());
            };
        }
        // Consume the closing '*/'
        self.read_char();
        self.read_char();

        Ok(())
    }

    /// Skips over any whitespace characters, comments, and newlines in the current input.
    ///
    /// # Returns
    ///
    /// A `Result` containing a `bool` indicating whether a newline character was
    /// encountered. If an error occurs, it returns an `Err(())`.
    fn skip_garbage(&mut self) -> Result<bool, ()> {
        // We store whether we encountered a newline because the lexer does
        // count newlines, however it only needs to know if it encountered one,
        // not how many it encountered.
        //
        // Note: The parser does depend on this functionality, so don't remove it. :)
        let mut encountered_newline = false;
        while matches!(self.chr, ' ' | '\t' | '\n' | '\r' | '/') {
            match self.chr {
                // Skip whitespace
                ' ' | '\t' => self.skip_whitespace(),
                // Skip newlines
                '\n' | '\r' => {
                    encountered_newline = true;
                    self.read_char();
                }
                // Skip comments
                '/' => match self.peek_char() {
                    '/' => self.skip_comment(),
                    '*' => self.skip_multi_comment()?,
                    _ => break,
                },
                // The while statement above ensures that there can be no other pattern, but we need
                // to handle it in this match statement to satisfy the compiler.
                _ => unreachable!(),
            }
        }
        return Ok(encountered_newline);
    }

    /// Skips over any whitespace characters in the current input.
    fn skip_whitespace(&mut self) {
        while matches!(self.chr, ' ' | '\t') {
            self.read_char();
        }
    }
}

/// Serves as a source of truth for the definition of what an identifier can start with.
fn is_valid_ident_start(chr: char) -> bool {
    chr.is_ascii_alphabetic() || chr == '_'
}

/// Serves as a source of truth for the definition of what an identifier can continue
/// with.
fn is_valid_ident_continue(chr: char) -> bool {
    chr.is_ascii_alphanumeric() || chr == '_' || is_digit(chr)
}

/// Serves as a source of truth for the definition of a 'digit'.
fn is_digit(chr: char) -> bool {
    chr.is_ascii_digit()
}