Skip to main content

cypherlite_query/lexer/
mod.rs

1// Lexer module: logos-based tokenizer for openCypher subset
2
3use logos::Logos;
4use std::fmt;
5
6// ---------------------------------------------------------------------------
7// Span
8// ---------------------------------------------------------------------------
9
10/// Byte-offset span in the source input.
11#[derive(Debug, Clone, Copy, PartialEq, Eq)]
12pub struct Span {
13    /// Byte offset of the first character in the token.
14    pub start: usize,
15    /// Byte offset one past the last character in the token.
16    pub end: usize,
17}
18
19// ---------------------------------------------------------------------------
20// LexError
21// ---------------------------------------------------------------------------
22
23/// Error produced when the lexer encounters an unrecognized character.
24#[derive(Debug, Clone, PartialEq, Eq)]
25pub struct LexError {
26    /// Byte offset of the unrecognized character.
27    pub position: usize,
28    /// The unrecognized character (if available).
29    pub character: Option<char>,
30}
31
32impl fmt::Display for LexError {
33    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
34        match self.character {
35            Some(ch) => write!(
36                f,
37                "unexpected character '{}' at byte offset {}",
38                ch, self.position
39            ),
40            None => write!(f, "unexpected token at byte offset {}", self.position),
41        }
42    }
43}
44
45impl std::error::Error for LexError {}
46
47// ---------------------------------------------------------------------------
48// Token
49// ---------------------------------------------------------------------------
50
51/// Lexical token for the openCypher subset supported by CypherLite.
52#[derive(Logos, Debug, PartialEq, Clone)]
53#[logos(skip r"[ \t\r\n]+")]
54pub enum Token {
55    // -- P0 Keywords (case-insensitive) ------------------------------------
56    /// `MATCH` keyword.
57    #[regex("(?i)match", priority = 10)]
58    Match,
59    /// `RETURN` keyword.
60    #[regex("(?i)return", priority = 10)]
61    Return,
62    /// `CREATE` keyword.
63    #[regex("(?i)create", priority = 10)]
64    Create,
65    /// `AS` keyword (alias).
66    #[regex("(?i)as", priority = 10)]
67    As,
68    /// `DISTINCT` keyword.
69    #[regex("(?i)distinct", priority = 10)]
70    Distinct,
71    /// Boolean literal `true`.
72    #[regex("(?i)true", priority = 10)]
73    True,
74    /// Boolean literal `false`.
75    #[regex("(?i)false", priority = 10)]
76    False,
77    /// `NULL` literal.
78    #[regex("(?i)null", priority = 10)]
79    Null,
80    /// Logical `AND` operator.
81    #[regex("(?i)and", priority = 10)]
82    And,
83    /// Logical `OR` operator.
84    #[regex("(?i)or", priority = 10)]
85    Or,
86    /// Logical `NOT` operator.
87    #[regex("(?i)not", priority = 10)]
88    Not,
89    /// `IS` keyword (used in `IS NULL` / `IS NOT NULL`).
90    #[regex("(?i)is", priority = 10)]
91    Is,
92    /// `COUNT` aggregate function keyword.
93    #[regex("(?i)count", priority = 10)]
94    Count,
95
96    // -- P1 Keywords -------------------------------------------------------
97    /// `WHERE` clause keyword.
98    #[regex("(?i)where", priority = 10)]
99    Where,
100    /// `SET` clause keyword.
101    #[regex("(?i)set", priority = 10)]
102    Set,
103    /// `REMOVE` clause keyword.
104    #[regex("(?i)remove", priority = 10)]
105    Remove,
106    /// `DELETE` clause keyword.
107    #[regex("(?i)delete", priority = 10)]
108    Delete,
109    /// `DETACH` modifier for `DELETE`.
110    #[regex("(?i)detach", priority = 10)]
111    Detach,
112    /// `OPTIONAL` modifier for `MATCH`.
113    #[regex("(?i)optional", priority = 10)]
114    Optional,
115
116    // -- P2 Keywords -------------------------------------------------------
117    /// `MERGE` clause keyword.
118    #[regex("(?i)merge", priority = 10)]
119    Merge,
120    /// `WITH` clause keyword.
121    #[regex("(?i)with", priority = 10)]
122    With,
123    /// `ORDER` keyword (part of `ORDER BY`).
124    #[regex("(?i)order", priority = 10)]
125    Order,
126    /// `BY` keyword (part of `ORDER BY`).
127    #[regex("(?i)by", priority = 10)]
128    By,
129    /// `LIMIT` clause keyword.
130    #[regex("(?i)limit", priority = 10)]
131    Limit,
132    /// `SKIP` clause keyword.
133    #[regex("(?i)skip", priority = 10)]
134    Skip,
135    /// `ASC` sort direction.
136    #[regex("(?i)asc", priority = 10)]
137    Asc,
138    /// `DESC` sort direction.
139    #[regex("(?i)desc", priority = 10)]
140    Desc,
141    /// `ON` keyword (used in `CREATE INDEX ... ON`).
142    #[regex("(?i)on", priority = 10)]
143    On,
144
145    // -- P3 Keywords -------------------------------------------------------
146    /// `UNWIND` clause keyword.
147    #[regex("(?i)unwind", priority = 10)]
148    Unwind,
149    /// `INDEX` keyword.
150    #[regex("(?i)index", priority = 10)]
151    Index,
152    /// `DROP` keyword.
153    #[regex("(?i)drop", priority = 10)]
154    Drop,
155
156    // -- P5 Keywords (Edge Indexes) ------------------------------------------
157    /// `EDGE` keyword (part of `CREATE EDGE INDEX`).
158    #[regex("(?i)edge", priority = 10)]
159    Edge,
160
161    // -- P6 Keywords (Subgraph/Snapshot) ------------------------------------
162    /// `SNAPSHOT` keyword (part of `CREATE SNAPSHOT`).
163    #[regex("(?i)snapshot", priority = 10)]
164    Snapshot,
165    /// `FROM` keyword (part of `CREATE SNAPSHOT ... FROM`).
166    #[regex("(?i)from", priority = 10)]
167    From,
168
169    // -- P7 Keywords (Hypergraph) --------------------------------------------
170    /// `HYPEREDGE` keyword for hypergraph queries.
171    #[cfg(feature = "hypergraph")]
172    #[regex("(?i)hyperedge", priority = 10)]
173    Hyperedge,
174
175    // -- P4 Keywords (Temporal) ---------------------------------------------
176    /// `AT` keyword (part of `AT TIME`).
177    #[regex("(?i)at", priority = 10)]
178    At,
179    /// `TIME` keyword (part of `AT TIME` / `BETWEEN TIME`).
180    #[regex("(?i)time", priority = 10)]
181    Time,
182    /// `BETWEEN` keyword (part of `BETWEEN TIME`).
183    #[regex("(?i)between", priority = 10)]
184    Between,
185    /// `HISTORY` keyword for version history queries.
186    #[regex("(?i)history", priority = 10)]
187    History,
188
189    // -- Literals ----------------------------------------------------------
190    /// Floating-point literal (must come before integer to match greedily).
191    #[regex(r"[0-9]+\.[0-9]+", lex_float, priority = 3)]
192    Float(f64),
193
194    /// Integer literal.
195    #[regex(r"[0-9]+", lex_integer, priority = 2)]
196    Integer(i64),
197
198    /// Single-quoted string literal with escape sequences.
199    #[regex(r"'([^'\\]|\\.)*'", lex_string)]
200    StringLiteral(String),
201
202    // -- Identifiers -------------------------------------------------------
203    /// Backtick-quoted identifier.
204    #[regex(r"`[^`]+`", lex_backtick_ident)]
205    BacktickIdent(String),
206
207    /// Regular identifier (lower priority than keywords).
208    #[regex(r"[a-zA-Z_][a-zA-Z0-9_]*", lex_ident, priority = 1)]
209    Ident(String),
210
211    /// Parameter reference ($name).
212    #[regex(r"\$[a-zA-Z_][a-zA-Z0-9_]*", lex_param)]
213    Parameter(String),
214
215    // -- Operators ---------------------------------------------------------
216    /// `<>` not-equal operator.
217    #[token("<>")]
218    NotEqual,
219    /// `!=` not-equal operator (alias).
220    #[token("!=")]
221    BangEqual,
222    /// `<=` less-than-or-equal operator.
223    #[token("<=")]
224    LessEqual,
225    /// `>=` greater-than-or-equal operator.
226    #[token(">=")]
227    GreaterEqual,
228    /// `=` equality / assignment operator.
229    #[token("=")]
230    Eq,
231    /// `<` less-than operator.
232    #[token("<")]
233    Less,
234    /// `>` greater-than operator.
235    #[token(">")]
236    Greater,
237    /// `+` addition operator.
238    #[token("+")]
239    Plus,
240    /// `-` subtraction / negation operator.
241    #[token("-")]
242    Minus,
243    /// `*` multiplication / wildcard operator.
244    #[token("*")]
245    Star,
246    /// `/` division operator.
247    #[token("/")]
248    Slash,
249    /// `%` modulus operator.
250    #[token("%")]
251    Percent,
252
253    // -- Arrow tokens ------------------------------------------------------
254    /// `->` right-directed relationship arrow.
255    #[token("->")]
256    ArrowRight,
257    /// `<-` left-directed relationship arrow.
258    #[token("<-")]
259    ArrowLeft,
260    /// `--` undirected relationship connector.
261    #[token("--")]
262    DoubleDash,
263
264    // -- Punctuation -------------------------------------------------------
265    /// `(` left parenthesis.
266    #[token("(")]
267    LParen,
268    /// `)` right parenthesis.
269    #[token(")")]
270    RParen,
271    /// `[` left bracket.
272    #[token("[")]
273    LBracket,
274    /// `]` right bracket.
275    #[token("]")]
276    RBracket,
277    /// `{` left brace.
278    #[token("{")]
279    LBrace,
280    /// `}` right brace.
281    #[token("}")]
282    RBrace,
283    /// `:` colon (label separator, property key separator).
284    #[token(":")]
285    Colon,
286    /// `..` range separator for variable-length paths.
287    #[token("..")]
288    DoubleDot,
289    /// `.` property access operator.
290    #[token(".")]
291    Dot,
292    /// `,` comma separator.
293    #[token(",")]
294    Comma,
295    /// `|` pipe separator (used in relationship type alternatives).
296    #[token("|")]
297    Pipe,
298}
299
300// ---------------------------------------------------------------------------
301// Callback helpers
302// ---------------------------------------------------------------------------
303
304fn lex_float(lex: &mut logos::Lexer<Token>) -> Option<f64> {
305    lex.slice().parse::<f64>().ok()
306}
307
308fn lex_integer(lex: &mut logos::Lexer<Token>) -> Option<i64> {
309    lex.slice().parse::<i64>().ok()
310}
311
312fn lex_ident(lex: &mut logos::Lexer<Token>) -> String {
313    lex.slice().to_string()
314}
315
316fn lex_backtick_ident(lex: &mut logos::Lexer<Token>) -> String {
317    let s = lex.slice();
318    // Strip surrounding backticks
319    s[1..s.len() - 1].to_string()
320}
321
322fn lex_param(lex: &mut logos::Lexer<Token>) -> String {
323    // Strip leading '$'
324    lex.slice()[1..].to_string()
325}
326
327/// Process a single-quoted string literal, resolving escape sequences.
328fn lex_string(lex: &mut logos::Lexer<Token>) -> String {
329    let raw = lex.slice();
330    // Strip surrounding quotes
331    let inner = &raw[1..raw.len() - 1];
332    let mut result = String::with_capacity(inner.len());
333    let mut chars = inner.chars();
334    while let Some(ch) = chars.next() {
335        if ch == '\\' {
336            match chars.next() {
337                Some('n') => result.push('\n'),
338                Some('t') => result.push('\t'),
339                Some('\\') => result.push('\\'),
340                Some('\'') => result.push('\''),
341                Some(other) => {
342                    result.push('\\');
343                    result.push(other);
344                }
345                None => result.push('\\'),
346            }
347        } else {
348            result.push(ch);
349        }
350    }
351    result
352}
353
354// ---------------------------------------------------------------------------
355// Public lex function
356// ---------------------------------------------------------------------------
357
358/// Tokenize the input string, returning a vector of (Token, Span) pairs.
359///
360/// Returns `Err(LexError)` on the first unrecognized character.
361pub fn lex(input: &str) -> Result<Vec<(Token, Span)>, LexError> {
362    let mut lexer = Token::lexer(input);
363    let mut tokens = Vec::new();
364
365    while let Some(result) = lexer.next() {
366        let span = lexer.span();
367        match result {
368            Ok(token) => {
369                tokens.push((
370                    token,
371                    Span {
372                        start: span.start,
373                        end: span.end,
374                    },
375                ));
376            }
377            Err(()) => {
378                let position = span.start;
379                let character = input[position..].chars().next();
380                return Err(LexError {
381                    position,
382                    character,
383                });
384            }
385        }
386    }
387
388    Ok(tokens)
389}
390
391// ---------------------------------------------------------------------------
392// Tests
393// ---------------------------------------------------------------------------
394
395#[cfg(test)]
396mod tests {
397    use super::*;
398
399    // Helper to extract just the tokens (without spans) for easier assertions.
400    fn tokens(input: &str) -> Vec<Token> {
401        lex(input)
402            .expect("lexing should succeed")
403            .into_iter()
404            .map(|(tok, _)| tok)
405            .collect()
406    }
407
408    // ---- LEX-T001: Empty input ------------------------------------------
409
410    #[test]
411    fn lex_t001_empty_input() {
412        let result = lex("");
413        assert_eq!(result.expect("should succeed"), vec![]);
414    }
415
416    #[test]
417    fn lex_t001_whitespace_only() {
418        let result = lex("   \t\n\r  ");
419        assert_eq!(result.expect("should succeed"), vec![]);
420    }
421
422    // ---- LEX-T002: MATCH (n) RETURN n -----------------------------------
423
424    #[test]
425    fn lex_t002_match_return_basic() {
426        let toks = tokens("MATCH (n) RETURN n");
427        assert_eq!(
428            toks,
429            vec![
430                Token::Match,
431                Token::LParen,
432                Token::Ident("n".to_string()),
433                Token::RParen,
434                Token::Return,
435                Token::Ident("n".to_string()),
436            ]
437        );
438    }
439
440    // ---- LEX-T003: Case-insensitive keywords ----------------------------
441
442    #[test]
443    fn lex_t003_case_insensitive_match() {
444        assert_eq!(tokens("mAtCh"), vec![Token::Match]);
445    }
446
447    #[test]
448    fn lex_t003_case_insensitive_return() {
449        assert_eq!(tokens("ReTuRn"), vec![Token::Return]);
450    }
451
452    #[test]
453    fn lex_t003_case_insensitive_create() {
454        assert_eq!(tokens("cReAtE"), vec![Token::Create]);
455    }
456
457    #[test]
458    fn lex_t003_all_keywords_lowercase() {
459        let kw_pairs = vec![
460            ("match", Token::Match),
461            ("return", Token::Return),
462            ("create", Token::Create),
463            ("as", Token::As),
464            ("distinct", Token::Distinct),
465            ("true", Token::True),
466            ("false", Token::False),
467            ("null", Token::Null),
468            ("and", Token::And),
469            ("or", Token::Or),
470            ("not", Token::Not),
471            ("is", Token::Is),
472            ("count", Token::Count),
473            ("where", Token::Where),
474            ("set", Token::Set),
475            ("remove", Token::Remove),
476            ("delete", Token::Delete),
477            ("detach", Token::Detach),
478            ("optional", Token::Optional),
479            ("merge", Token::Merge),
480            ("with", Token::With),
481            ("order", Token::Order),
482            ("by", Token::By),
483            ("limit", Token::Limit),
484            ("skip", Token::Skip),
485            ("asc", Token::Asc),
486            ("desc", Token::Desc),
487            ("on", Token::On),
488            ("unwind", Token::Unwind),
489            ("index", Token::Index),
490            ("drop", Token::Drop),
491            ("edge", Token::Edge),
492        ];
493        for (input, expected) in kw_pairs {
494            assert_eq!(
495                tokens(input),
496                vec![expected],
497                "keyword '{}' should be recognized",
498                input,
499            );
500        }
501    }
502
503    // ---- LEX-T004: String literal with escape sequences -----------------
504
505    #[test]
506    fn lex_t004_string_with_newline_escape() {
507        let toks = tokens(r"'hello\nworld'");
508        assert_eq!(toks, vec![Token::StringLiteral("hello\nworld".to_string())]);
509    }
510
511    #[test]
512    fn lex_t004_string_with_tab_escape() {
513        let toks = tokens(r"'a\tb'");
514        assert_eq!(toks, vec![Token::StringLiteral("a\tb".to_string())]);
515    }
516
517    #[test]
518    fn lex_t004_string_with_backslash_escape() {
519        let toks = tokens(r"'a\\b'");
520        assert_eq!(toks, vec![Token::StringLiteral("a\\b".to_string())]);
521    }
522
523    #[test]
524    fn lex_t004_string_with_quote_escape() {
525        let toks = tokens(r"'it\'s'");
526        assert_eq!(toks, vec![Token::StringLiteral("it's".to_string())]);
527    }
528
529    #[test]
530    fn lex_t004_empty_string() {
531        let toks = tokens("''");
532        assert_eq!(toks, vec![Token::StringLiteral(String::new())]);
533    }
534
535    // ---- LEX-T005: Unrecognized character -> LexError -------------------
536
537    #[test]
538    fn lex_t005_unrecognized_at_sign() {
539        let result = lex("@");
540        let err = result.expect_err("should fail on '@'");
541        assert_eq!(err.position, 0);
542        assert_eq!(err.character, Some('@'));
543    }
544
545    #[test]
546    fn lex_t005_unrecognized_after_valid_tokens() {
547        let result = lex("MATCH @");
548        let err = result.expect_err("should fail on '@'");
549        assert_eq!(err.position, 6);
550        assert_eq!(err.character, Some('@'));
551    }
552
553    // ---- LEX-T006: Integer and float literals ---------------------------
554
555    #[test]
556    fn lex_t006_integer_42() {
557        let toks = tokens("42");
558        assert_eq!(toks, vec![Token::Integer(42)]);
559    }
560
561    #[test]
562    fn lex_t006_float_3_14() {
563        let toks = tokens("3.15");
564        assert_eq!(toks, vec![Token::Float(3.15)]);
565    }
566
567    #[test]
568    fn lex_t006_integer_zero() {
569        let toks = tokens("0");
570        assert_eq!(toks, vec![Token::Integer(0)]);
571    }
572
573    #[test]
574    fn lex_t006_float_zero_point_zero() {
575        let toks = tokens("0.0");
576        assert_eq!(toks, vec![Token::Float(0.0)]);
577    }
578
579    // ---- LEX-T007: Unicode in backtick-quoted identifiers ---------------
580
581    #[test]
582    fn lex_t007_backtick_unicode_identifier() {
583        let toks = tokens("`user name`");
584        assert_eq!(toks, vec![Token::BacktickIdent("user name".to_string())]);
585    }
586
587    #[test]
588    fn lex_t007_backtick_korean_identifier() {
589        let toks = tokens("`이름`");
590        assert_eq!(toks, vec![Token::BacktickIdent("이름".to_string())]);
591    }
592
593    // ---- Operators ------------------------------------------------------
594
595    #[test]
596    fn lex_operators_comparison() {
597        let toks = tokens("= <> != < <= > >=");
598        assert_eq!(
599            toks,
600            vec![
601                Token::Eq,
602                Token::NotEqual,
603                Token::BangEqual,
604                Token::Less,
605                Token::LessEqual,
606                Token::Greater,
607                Token::GreaterEqual,
608            ]
609        );
610    }
611
612    #[test]
613    fn lex_operators_arithmetic() {
614        let toks = tokens("+ - * / %");
615        assert_eq!(
616            toks,
617            vec![
618                Token::Plus,
619                Token::Minus,
620                Token::Star,
621                Token::Slash,
622                Token::Percent,
623            ]
624        );
625    }
626
627    // ---- Punctuation ----------------------------------------------------
628
629    #[test]
630    fn lex_punctuation() {
631        let toks = tokens("( ) [ ] { } : . , |");
632        assert_eq!(
633            toks,
634            vec![
635                Token::LParen,
636                Token::RParen,
637                Token::LBracket,
638                Token::RBracket,
639                Token::LBrace,
640                Token::RBrace,
641                Token::Colon,
642                Token::Dot,
643                Token::Comma,
644                Token::Pipe,
645            ]
646        );
647    }
648
649    // ---- TASK-102: DoubleDot token ----------------------------------------
650
651    #[test]
652    fn lex_double_dot() {
653        let toks = tokens("..");
654        assert_eq!(toks, vec![Token::DoubleDot]);
655    }
656
657    #[test]
658    fn lex_double_dot_in_var_length() {
659        let toks = tokens("*1..3");
660        assert_eq!(
661            toks,
662            vec![
663                Token::Star,
664                Token::Integer(1),
665                Token::DoubleDot,
666                Token::Integer(3),
667            ]
668        );
669    }
670
671    #[test]
672    fn lex_single_dot_still_works() {
673        let toks = tokens("n.name");
674        assert_eq!(
675            toks,
676            vec![
677                Token::Ident("n".to_string()),
678                Token::Dot,
679                Token::Ident("name".to_string()),
680            ]
681        );
682    }
683
684    // ---- Arrow tokens ---------------------------------------------------
685
686    #[test]
687    fn lex_arrow_right() {
688        let toks = tokens("->");
689        assert_eq!(toks, vec![Token::ArrowRight]);
690    }
691
692    #[test]
693    fn lex_arrow_left() {
694        let toks = tokens("<-");
695        assert_eq!(toks, vec![Token::ArrowLeft]);
696    }
697
698    #[test]
699    fn lex_double_dash() {
700        let toks = tokens("--");
701        assert_eq!(toks, vec![Token::DoubleDash]);
702    }
703
704    // ---- Parameter tokens -----------------------------------------------
705
706    #[test]
707    fn lex_parameter() {
708        let toks = tokens("$name");
709        assert_eq!(toks, vec![Token::Parameter("name".to_string())]);
710    }
711
712    #[test]
713    fn lex_parameter_with_underscore() {
714        let toks = tokens("$user_id");
715        assert_eq!(toks, vec![Token::Parameter("user_id".to_string())]);
716    }
717
718    // ---- Identifier vs keyword boundary ---------------------------------
719
720    #[test]
721    fn lex_identifier_starting_with_keyword_prefix() {
722        // "matching" should be an identifier, not MATCH + "ing"
723        let toks = tokens("matching");
724        assert_eq!(toks, vec![Token::Ident("matching".to_string())]);
725    }
726
727    #[test]
728    fn lex_identifier_returns() {
729        // "returns" should be an identifier, not RETURN + "s"
730        let toks = tokens("returns");
731        assert_eq!(toks, vec![Token::Ident("returns".to_string())]);
732    }
733
734    #[test]
735    fn lex_identifier_underscore_prefix() {
736        let toks = tokens("_private");
737        assert_eq!(toks, vec![Token::Ident("_private".to_string())]);
738    }
739
740    // ---- Span tracking --------------------------------------------------
741
742    #[test]
743    fn lex_span_tracking() {
744        let result = lex("MATCH (n)").expect("should succeed");
745        assert_eq!(result[0].1, Span { start: 0, end: 5 }); // MATCH
746        assert_eq!(result[1].1, Span { start: 6, end: 7 }); // (
747        assert_eq!(result[2].1, Span { start: 7, end: 8 }); // n
748        assert_eq!(result[3].1, Span { start: 8, end: 9 }); // )
749    }
750
751    // ---- Complex queries ------------------------------------------------
752
753    #[test]
754    fn lex_relationship_pattern() {
755        let toks = tokens("(a)-[:KNOWS]->(b)");
756        assert_eq!(
757            toks,
758            vec![
759                Token::LParen,
760                Token::Ident("a".to_string()),
761                Token::RParen,
762                Token::Minus,
763                Token::LBracket,
764                Token::Colon,
765                Token::Ident("KNOWS".to_string()),
766                Token::RBracket,
767                Token::ArrowRight,
768                Token::LParen,
769                Token::Ident("b".to_string()),
770                Token::RParen,
771            ]
772        );
773    }
774
775    #[test]
776    fn lex_where_clause() {
777        let toks = tokens("WHERE n.age >= 18 AND n.name <> 'unknown'");
778        assert_eq!(
779            toks,
780            vec![
781                Token::Where,
782                Token::Ident("n".to_string()),
783                Token::Dot,
784                Token::Ident("age".to_string()),
785                Token::GreaterEqual,
786                Token::Integer(18),
787                Token::And,
788                Token::Ident("n".to_string()),
789                Token::Dot,
790                Token::Ident("name".to_string()),
791                Token::NotEqual,
792                Token::StringLiteral("unknown".to_string()),
793            ]
794        );
795    }
796
797    #[test]
798    fn lex_return_with_alias() {
799        let toks = tokens("RETURN n.name AS username, COUNT(DISTINCT n)");
800        assert_eq!(
801            toks,
802            vec![
803                Token::Return,
804                Token::Ident("n".to_string()),
805                Token::Dot,
806                Token::Ident("name".to_string()),
807                Token::As,
808                Token::Ident("username".to_string()),
809                Token::Comma,
810                Token::Count,
811                Token::LParen,
812                Token::Distinct,
813                Token::Ident("n".to_string()),
814                Token::RParen,
815            ]
816        );
817    }
818
819    #[test]
820    fn lex_order_by_limit_skip() {
821        let toks = tokens("ORDER BY n.age DESC SKIP 10 LIMIT 5");
822        assert_eq!(
823            toks,
824            vec![
825                Token::Order,
826                Token::By,
827                Token::Ident("n".to_string()),
828                Token::Dot,
829                Token::Ident("age".to_string()),
830                Token::Desc,
831                Token::Skip,
832                Token::Integer(10),
833                Token::Limit,
834                Token::Integer(5),
835            ]
836        );
837    }
838
839    #[test]
840    fn lex_create_with_properties() {
841        let toks = tokens("CREATE (n:Person {name: 'Alice', age: 30})");
842        assert_eq!(
843            toks,
844            vec![
845                Token::Create,
846                Token::LParen,
847                Token::Ident("n".to_string()),
848                Token::Colon,
849                Token::Ident("Person".to_string()),
850                Token::LBrace,
851                Token::Ident("name".to_string()),
852                Token::Colon,
853                Token::StringLiteral("Alice".to_string()),
854                Token::Comma,
855                Token::Ident("age".to_string()),
856                Token::Colon,
857                Token::Integer(30),
858                Token::RBrace,
859                Token::RParen,
860            ]
861        );
862    }
863
864    #[test]
865    fn lex_error_display() {
866        let err = LexError {
867            position: 5,
868            character: Some('@'),
869        };
870        assert_eq!(err.to_string(), "unexpected character '@' at byte offset 5");
871    }
872
873    #[test]
874    fn lex_error_display_no_char() {
875        let err = LexError {
876            position: 5,
877            character: None,
878        };
879        assert_eq!(err.to_string(), "unexpected token at byte offset 5");
880    }
881
882    #[test]
883    fn lex_boolean_literals_in_context() {
884        let toks = tokens("true AND false OR NOT null IS null");
885        assert_eq!(
886            toks,
887            vec![
888                Token::True,
889                Token::And,
890                Token::False,
891                Token::Or,
892                Token::Not,
893                Token::Null,
894                Token::Is,
895                Token::Null,
896            ]
897        );
898    }
899
900    #[test]
901    fn lex_optional_match() {
902        let toks = tokens("OPTIONAL MATCH");
903        assert_eq!(toks, vec![Token::Optional, Token::Match]);
904    }
905
906    #[test]
907    fn lex_detach_delete() {
908        let toks = tokens("DETACH DELETE n");
909        assert_eq!(
910            toks,
911            vec![Token::Detach, Token::Delete, Token::Ident("n".to_string()),]
912        );
913    }
914
915    #[test]
916    fn lex_merge_on() {
917        let toks = tokens("MERGE (n) ON MATCH SET n.x = 1");
918        assert_eq!(
919            toks,
920            vec![
921                Token::Merge,
922                Token::LParen,
923                Token::Ident("n".to_string()),
924                Token::RParen,
925                Token::On,
926                Token::Match,
927                Token::Set,
928                Token::Ident("n".to_string()),
929                Token::Dot,
930                Token::Ident("x".to_string()),
931                Token::Eq,
932                Token::Integer(1),
933            ]
934        );
935    }
936
937    #[test]
938    fn lex_with_clause() {
939        let toks = tokens("WITH n, m");
940        assert_eq!(
941            toks,
942            vec![
943                Token::With,
944                Token::Ident("n".to_string()),
945                Token::Comma,
946                Token::Ident("m".to_string()),
947            ]
948        );
949    }
950
951    #[test]
952    fn lex_remove_keyword() {
953        let toks = tokens("REMOVE n.prop");
954        assert_eq!(
955            toks,
956            vec![
957                Token::Remove,
958                Token::Ident("n".to_string()),
959                Token::Dot,
960                Token::Ident("prop".to_string()),
961            ]
962        );
963    }
964
965    #[test]
966    fn lex_left_arrow_relationship() {
967        let toks = tokens("(a)<-[:KNOWS]-(b)");
968        assert_eq!(
969            toks,
970            vec![
971                Token::LParen,
972                Token::Ident("a".to_string()),
973                Token::RParen,
974                Token::ArrowLeft,
975                Token::LBracket,
976                Token::Colon,
977                Token::Ident("KNOWS".to_string()),
978                Token::RBracket,
979                Token::Minus,
980                Token::LParen,
981                Token::Ident("b".to_string()),
982                Token::RParen,
983            ]
984        );
985    }
986
987    #[test]
988    fn lex_undirected_relationship() {
989        let toks = tokens("(a)--(b)");
990        assert_eq!(
991            toks,
992            vec![
993                Token::LParen,
994                Token::Ident("a".to_string()),
995                Token::RParen,
996                Token::DoubleDash,
997                Token::LParen,
998                Token::Ident("b".to_string()),
999                Token::RParen,
1000            ]
1001        );
1002    }
1003
1004    // ---- TASK-066: UNWIND keyword token ---------------------------------
1005
1006    #[test]
1007    fn lex_t066_unwind_keyword() {
1008        let toks = tokens("UNWIND");
1009        assert_eq!(toks, vec![Token::Unwind]);
1010    }
1011
1012    #[test]
1013    fn lex_t066_unwind_case_insensitive() {
1014        assert_eq!(tokens("unwind"), vec![Token::Unwind]);
1015        assert_eq!(tokens("Unwind"), vec![Token::Unwind]);
1016        assert_eq!(tokens("uNwInD"), vec![Token::Unwind]);
1017    }
1018
1019    #[test]
1020    fn lex_t066_unwind_not_prefix_of_identifier() {
1021        // "unwinding" should be an identifier, not UNWIND + "ing"
1022        let toks = tokens("unwinding");
1023        assert_eq!(toks, vec![Token::Ident("unwinding".to_string())]);
1024    }
1025
1026    #[test]
1027    fn lex_t066_unwind_in_context() {
1028        let toks = tokens("UNWIND [1, 2, 3] AS x");
1029        assert_eq!(
1030            toks,
1031            vec![
1032                Token::Unwind,
1033                Token::LBracket,
1034                Token::Integer(1),
1035                Token::Comma,
1036                Token::Integer(2),
1037                Token::Comma,
1038                Token::Integer(3),
1039                Token::RBracket,
1040                Token::As,
1041                Token::Ident("x".to_string()),
1042            ]
1043        );
1044    }
1045
1046    // ---- TASK-098: INDEX and DROP keyword tokens --------------------------
1047
1048    #[test]
1049    fn lex_t098_index_keyword() {
1050        let toks = tokens("INDEX");
1051        assert_eq!(toks, vec![Token::Index]);
1052    }
1053
1054    #[test]
1055    fn lex_t098_index_case_insensitive() {
1056        assert_eq!(tokens("index"), vec![Token::Index]);
1057        assert_eq!(tokens("Index"), vec![Token::Index]);
1058    }
1059
1060    #[test]
1061    fn lex_t098_drop_keyword() {
1062        let toks = tokens("DROP");
1063        assert_eq!(toks, vec![Token::Drop]);
1064    }
1065
1066    #[test]
1067    fn lex_t098_drop_case_insensitive() {
1068        assert_eq!(tokens("drop"), vec![Token::Drop]);
1069        assert_eq!(tokens("Drop"), vec![Token::Drop]);
1070    }
1071
1072    #[test]
1073    fn lex_t098_create_index_on() {
1074        let toks = tokens("CREATE INDEX idx_name ON :Person(name)");
1075        assert_eq!(
1076            toks,
1077            vec![
1078                Token::Create,
1079                Token::Index,
1080                Token::Ident("idx_name".to_string()),
1081                Token::On,
1082                Token::Colon,
1083                Token::Ident("Person".to_string()),
1084                Token::LParen,
1085                Token::Ident("name".to_string()),
1086                Token::RParen,
1087            ]
1088        );
1089    }
1090
1091    #[test]
1092    fn lex_t098_drop_index() {
1093        let toks = tokens("DROP INDEX idx_name");
1094        assert_eq!(
1095            toks,
1096            vec![
1097                Token::Drop,
1098                Token::Index,
1099                Token::Ident("idx_name".to_string()),
1100            ]
1101        );
1102    }
1103
1104    // ---- X-T1: AT, TIME, BETWEEN, HISTORY keyword tokens ----------------
1105
1106    #[test]
1107    fn lex_xt1_at_keyword() {
1108        let toks = tokens("AT");
1109        assert_eq!(toks, vec![Token::At]);
1110    }
1111
1112    #[test]
1113    fn lex_xt1_at_case_insensitive() {
1114        assert_eq!(tokens("at"), vec![Token::At]);
1115        assert_eq!(tokens("At"), vec![Token::At]);
1116    }
1117
1118    #[test]
1119    fn lex_xt1_time_keyword() {
1120        let toks = tokens("TIME");
1121        assert_eq!(toks, vec![Token::Time]);
1122    }
1123
1124    #[test]
1125    fn lex_xt1_time_case_insensitive() {
1126        assert_eq!(tokens("time"), vec![Token::Time]);
1127        assert_eq!(tokens("Time"), vec![Token::Time]);
1128    }
1129
1130    #[test]
1131    fn lex_xt1_between_keyword() {
1132        let toks = tokens("BETWEEN");
1133        assert_eq!(toks, vec![Token::Between]);
1134    }
1135
1136    #[test]
1137    fn lex_xt1_between_case_insensitive() {
1138        assert_eq!(tokens("between"), vec![Token::Between]);
1139        assert_eq!(tokens("Between"), vec![Token::Between]);
1140    }
1141
1142    #[test]
1143    fn lex_xt1_history_keyword() {
1144        let toks = tokens("HISTORY");
1145        assert_eq!(toks, vec![Token::History]);
1146    }
1147
1148    #[test]
1149    fn lex_xt1_history_case_insensitive() {
1150        assert_eq!(tokens("history"), vec![Token::History]);
1151        assert_eq!(tokens("History"), vec![Token::History]);
1152    }
1153
1154    #[test]
1155    fn lex_xt1_at_time_in_context() {
1156        let toks = tokens("MATCH (n:Person) AT TIME 1000 RETURN n");
1157        assert_eq!(
1158            toks,
1159            vec![
1160                Token::Match,
1161                Token::LParen,
1162                Token::Ident("n".to_string()),
1163                Token::Colon,
1164                Token::Ident("Person".to_string()),
1165                Token::RParen,
1166                Token::At,
1167                Token::Time,
1168                Token::Integer(1000),
1169                Token::Return,
1170                Token::Ident("n".to_string()),
1171            ]
1172        );
1173    }
1174
1175    #[test]
1176    fn lex_xt1_between_time_in_context() {
1177        let toks = tokens("BETWEEN TIME 100 AND 200");
1178        assert_eq!(
1179            toks,
1180            vec![
1181                Token::Between,
1182                Token::Time,
1183                Token::Integer(100),
1184                Token::And,
1185                Token::Integer(200),
1186            ]
1187        );
1188    }
1189
1190    #[test]
1191    fn lex_xt1_at_not_prefix_of_identifier() {
1192        let toks = tokens("atlas");
1193        assert_eq!(toks, vec![Token::Ident("atlas".to_string())]);
1194    }
1195
1196    #[test]
1197    fn lex_arithmetic_expression() {
1198        let toks = tokens("1 + 2 * 3 - 4 / 5 % 6");
1199        assert_eq!(
1200            toks,
1201            vec![
1202                Token::Integer(1),
1203                Token::Plus,
1204                Token::Integer(2),
1205                Token::Star,
1206                Token::Integer(3),
1207                Token::Minus,
1208                Token::Integer(4),
1209                Token::Slash,
1210                Token::Integer(5),
1211                Token::Percent,
1212                Token::Integer(6),
1213            ]
1214        );
1215    }
1216
1217    // ======================================================================
1218    // HH-001: SNAPSHOT and FROM keyword tokens
1219    // ======================================================================
1220
1221    #[test]
1222    fn lex_hh1_snapshot_keyword() {
1223        let toks = tokens("SNAPSHOT");
1224        assert_eq!(toks, vec![Token::Snapshot]);
1225    }
1226
1227    #[test]
1228    fn lex_hh1_snapshot_case_insensitive() {
1229        assert_eq!(tokens("snapshot"), vec![Token::Snapshot]);
1230        assert_eq!(tokens("Snapshot"), vec![Token::Snapshot]);
1231        assert_eq!(tokens("sNaPsHoT"), vec![Token::Snapshot]);
1232    }
1233
1234    #[test]
1235    fn lex_hh1_snapshot_not_prefix_of_identifier() {
1236        let toks = tokens("snapshots");
1237        assert_eq!(toks, vec![Token::Ident("snapshots".to_string())]);
1238    }
1239
1240    #[test]
1241    fn lex_hh1_from_keyword() {
1242        let toks = tokens("FROM");
1243        assert_eq!(toks, vec![Token::From]);
1244    }
1245
1246    #[test]
1247    fn lex_hh1_from_case_insensitive() {
1248        assert_eq!(tokens("from"), vec![Token::From]);
1249        assert_eq!(tokens("From"), vec![Token::From]);
1250        assert_eq!(tokens("fRoM"), vec![Token::From]);
1251    }
1252
1253    #[test]
1254    fn lex_hh1_from_not_prefix_of_identifier() {
1255        let toks = tokens("fromage");
1256        assert_eq!(toks, vec![Token::Ident("fromage".to_string())]);
1257    }
1258
1259    #[test]
1260    fn lex_hh1_create_snapshot_in_context() {
1261        let toks = tokens("CREATE SNAPSHOT (s:Snap) FROM MATCH (n:Person) RETURN n");
1262        assert_eq!(
1263            toks,
1264            vec![
1265                Token::Create,
1266                Token::Snapshot,
1267                Token::LParen,
1268                Token::Ident("s".to_string()),
1269                Token::Colon,
1270                Token::Ident("Snap".to_string()),
1271                Token::RParen,
1272                Token::From,
1273                Token::Match,
1274                Token::LParen,
1275                Token::Ident("n".to_string()),
1276                Token::Colon,
1277                Token::Ident("Person".to_string()),
1278                Token::RParen,
1279                Token::Return,
1280                Token::Ident("n".to_string()),
1281            ]
1282        );
1283    }
1284
1285    #[test]
1286    fn lex_hh1_all_keywords_include_new() {
1287        // Verify SNAPSHOT and FROM are recognized alongside existing keywords
1288        let kw_pairs = vec![("snapshot", Token::Snapshot), ("from", Token::From)];
1289        for (input, expected) in kw_pairs {
1290            assert_eq!(
1291                tokens(input),
1292                vec![expected],
1293                "keyword '{}' should be recognized",
1294                input,
1295            );
1296        }
1297    }
1298
1299    // ======================================================================
1300    // MM-001: HYPEREDGE keyword token (cfg-gated)
1301    // ======================================================================
1302
1303    #[cfg(feature = "hypergraph")]
1304    mod hypergraph_lexer_tests {
1305        use super::*;
1306
1307        #[test]
1308        fn lex_mm1_hyperedge_keyword() {
1309            let toks = tokens("HYPEREDGE");
1310            assert_eq!(toks, vec![Token::Hyperedge]);
1311        }
1312
1313        #[test]
1314        fn lex_mm1_hyperedge_case_insensitive() {
1315            assert_eq!(tokens("hyperedge"), vec![Token::Hyperedge]);
1316            assert_eq!(tokens("Hyperedge"), vec![Token::Hyperedge]);
1317            assert_eq!(tokens("hYpErEdGe"), vec![Token::Hyperedge]);
1318        }
1319
1320        #[test]
1321        fn lex_mm1_hyperedge_not_prefix_of_identifier() {
1322            let toks = tokens("hyperedges");
1323            assert_eq!(toks, vec![Token::Ident("hyperedges".to_string())]);
1324        }
1325
1326        #[test]
1327        fn lex_mm1_hyperedge_in_create_context() {
1328            let toks = tokens("CREATE HYPEREDGE h FROM (a) TO (b)");
1329            assert_eq!(
1330                toks,
1331                vec![
1332                    Token::Create,
1333                    Token::Hyperedge,
1334                    Token::Ident("h".to_string()),
1335                    Token::From,
1336                    Token::LParen,
1337                    Token::Ident("a".to_string()),
1338                    Token::RParen,
1339                    Token::Ident("TO".to_string()),
1340                    Token::LParen,
1341                    Token::Ident("b".to_string()),
1342                    Token::RParen,
1343                ]
1344            );
1345        }
1346
1347        #[test]
1348        fn lex_mm1_hyperedge_in_match_context() {
1349            let toks = tokens("MATCH HYPEREDGE (h:GroupMigration)");
1350            assert_eq!(
1351                toks,
1352                vec![
1353                    Token::Match,
1354                    Token::Hyperedge,
1355                    Token::LParen,
1356                    Token::Ident("h".to_string()),
1357                    Token::Colon,
1358                    Token::Ident("GroupMigration".to_string()),
1359                    Token::RParen,
1360                ]
1361            );
1362        }
1363    }
1364}