Skip to main content

styx_tokenizer/
tokenizer.rs

1//! Tokenizer for the Styx configuration language.
2
3use crate::{Span, Token, TokenKind};
4use tracing::trace;
5
6/// A tokenizer that produces tokens from Styx source text.
7#[derive(Clone)]
8pub struct Tokenizer<'src> {
9    /// The source text being tokenized.
10    source: &'src str,
11    /// The remaining source text (suffix of `source`).
12    remaining: &'src str,
13    /// Current byte position in `source`.
14    pos: u32,
15
16    /// State for heredoc parsing.
17    heredoc_state: Option<HeredocState>,
18}
19
20/// State for tracking heredoc parsing.
21#[derive(Debug, Clone)]
22struct HeredocState {
23    /// The delimiter to match (e.g., "EOF" for `<<EOF`)
24    delimiter: String,
25    /// Indentation of the closing delimiter (set when found).
26    /// This is the number of spaces/tabs before the closing delimiter.
27    closing_indent: Option<usize>,
28}
29
30impl<'src> Tokenizer<'src> {
31    /// Create a new tokenizer for the given source text.
32    pub fn new(source: &'src str) -> Self {
33        Self {
34            source,
35            remaining: source,
36            pos: 0,
37            heredoc_state: None,
38        }
39    }
40
41    /// Get the current byte position.
42    #[inline]
43    pub fn position(&self) -> u32 {
44        self.pos
45    }
46
47    /// Check if we're at the end of input.
48    #[inline]
49    pub fn is_eof(&self) -> bool {
50        self.remaining.is_empty()
51    }
52
53    /// Get the closing indent for the current heredoc (if any).
54    /// This is set after parsing heredoc content, before returning HeredocEnd.
55    /// Used by the lexer to apply dedent to heredoc content.
56    #[inline]
57    pub fn heredoc_closing_indent(&self) -> Option<usize> {
58        self.heredoc_state.as_ref().and_then(|s| s.closing_indent)
59    }
60
61    /// Peek at the next character without consuming it.
62    #[inline]
63    fn peek(&self) -> Option<char> {
64        self.remaining.chars().next()
65    }
66
67    /// Peek at the nth character (0-indexed) without consuming.
68    #[inline]
69    fn peek_nth(&self, n: usize) -> Option<char> {
70        self.remaining.chars().nth(n)
71    }
72
73    /// Advance by one character and return it.
74    #[inline]
75    fn advance(&mut self) -> Option<char> {
76        let c = self.peek()?;
77        self.pos += c.len_utf8() as u32;
78        self.remaining = &self.remaining[c.len_utf8()..];
79        Some(c)
80    }
81
82    /// Advance by n bytes.
83    #[inline]
84    fn advance_by(&mut self, n: usize) {
85        self.pos += n as u32;
86        self.remaining = &self.remaining[n..];
87    }
88
89    /// Check if the remaining text starts with the given prefix.
90    #[inline]
91    fn starts_with(&self, prefix: &str) -> bool {
92        self.remaining.starts_with(prefix)
93    }
94
95    /// Create a token from the given start position to current position.
96    fn token(&self, kind: TokenKind, start: u32) -> Token<'src> {
97        let span = Span::new(start, self.pos);
98        let text = &self.source[start as usize..self.pos as usize];
99        trace!("Token {:?} at {:?}: {:?}", kind, span, text);
100        Token::new(kind, span, text)
101    }
102
103    /// Get the next token.
104    pub fn next_token(&mut self) -> Token<'src> {
105        // Handle heredoc content if we're inside one
106        if let Some(ref state) = self.heredoc_state.clone() {
107            return self.tokenize_heredoc_content(&state.delimiter);
108        }
109
110        // Check for EOF
111        if self.is_eof() {
112            return self.token(TokenKind::Eof, self.pos);
113        }
114
115        let start = self.pos;
116        let c = self.peek().unwrap();
117
118        match c {
119            // Structural tokens
120            '{' => {
121                self.advance();
122                self.token(TokenKind::LBrace, start)
123            }
124            '}' => {
125                self.advance();
126                self.token(TokenKind::RBrace, start)
127            }
128            '(' => {
129                self.advance();
130                self.token(TokenKind::LParen, start)
131            }
132            ')' => {
133                self.advance();
134                self.token(TokenKind::RParen, start)
135            }
136            ',' => {
137                self.advance();
138                self.token(TokenKind::Comma, start)
139            }
140            '>' => {
141                self.advance();
142                self.token(TokenKind::Gt, start)
143            }
144            '@' => self.tokenize_at_or_tag(),
145
146            // Quoted scalar
147            '"' => self.tokenize_quoted_scalar(),
148
149            // Comment or doc comment
150            '/' if self.starts_with("///") => self.tokenize_doc_comment(),
151            '/' if self.starts_with("//") => self.tokenize_line_comment(),
152            // Single / is a bare scalar (e.g., /usr/bin/foo)
153            '/' => self.tokenize_bare_scalar(),
154
155            // Heredoc - only if << is followed by uppercase letter
156            // parser[impl scalar.heredoc.invalid]
157            '<' if self.starts_with("<<")
158                && matches!(self.peek_nth(2), Some(c) if c.is_ascii_uppercase()) =>
159            {
160                self.tokenize_heredoc_start()
161            }
162            // << not followed by uppercase is an error
163            '<' if self.starts_with("<<") => {
164                let start = self.pos;
165                self.advance(); // <
166                self.advance(); // <
167                self.token(TokenKind::Error, start)
168            }
169
170            // Raw string
171            'r' if matches!(self.peek_nth(1), Some('#' | '"')) => self.tokenize_raw_string(),
172
173            // Whitespace
174            ' ' | '\t' => self.tokenize_whitespace(),
175
176            // Newline
177            '\n' => {
178                self.advance();
179                self.token(TokenKind::Newline, start)
180            }
181            '\r' if self.peek_nth(1) == Some('\n') => {
182                self.advance();
183                self.advance();
184                self.token(TokenKind::Newline, start)
185            }
186
187            // Bare scalar (default for anything else that's not a special char)
188            _ if is_bare_scalar_start(c) => self.tokenize_bare_scalar(),
189
190            // Error: unrecognized character
191            _ => {
192                self.advance();
193                self.token(TokenKind::Error, start)
194            }
195        }
196    }
197
198    /// Tokenize horizontal whitespace (spaces and tabs).
199    fn tokenize_whitespace(&mut self) -> Token<'src> {
200        let start = self.pos;
201        while let Some(c) = self.peek() {
202            if c == ' ' || c == '\t' {
203                self.advance();
204            } else {
205                break;
206            }
207        }
208        self.token(TokenKind::Whitespace, start)
209    }
210
211    /// Tokenize a bare (unquoted) scalar.
212    fn tokenize_bare_scalar(&mut self) -> Token<'src> {
213        let start = self.pos;
214        while let Some(c) = self.peek() {
215            if is_bare_scalar_char(c) {
216                self.advance();
217            } else {
218                break;
219            }
220        }
221        self.token(TokenKind::BareScalar, start)
222    }
223
224    /// Tokenize `@` (unit) or `@name` (tag).
225    fn tokenize_at_or_tag(&mut self) -> Token<'src> {
226        let start = self.pos;
227        self.advance(); // consume `@`
228
229        // Check if followed by tag name start: [A-Za-z_]
230        match self.peek() {
231            Some(c) if c.is_ascii_alphabetic() || c == '_' => {
232                self.consume_tag_segment();
233
234                // Chained tags: @outer/@inner/@leaf
235                while self.peek() == Some('/')
236                    && self.peek_nth(1) == Some('@')
237                    && matches!(self.peek_nth(2), Some(c) if c.is_ascii_alphabetic() || c == '_')
238                {
239                    self.advance(); // consume `/`
240                    self.advance(); // consume `@`
241                    self.consume_tag_segment();
242                }
243                self.token(TokenKind::Tag, start)
244            }
245            _ => {
246                // Standalone @ = unit
247                self.token(TokenKind::At, start)
248            }
249        }
250    }
251
252    /// Consume one tag name segment after the leading `@`.
253    fn consume_tag_segment(&mut self) {
254        // Tag name: consume [A-Za-z0-9_-]*
255        // But stop before `r#` or `r"` which starts a raw string payload.
256        self.advance();
257        while let Some(c) = self.peek() {
258            if c == 'r' && matches!(self.peek_nth(1), Some('#' | '"')) {
259                break;
260            }
261            if c.is_ascii_alphanumeric() || c == '_' || c == '-' {
262                self.advance();
263            } else {
264                break;
265            }
266        }
267    }
268
269    /// Tokenize a quoted scalar: `"..."`.
270    fn tokenize_quoted_scalar(&mut self) -> Token<'src> {
271        let start = self.pos;
272
273        // Consume opening quote
274        self.advance();
275
276        loop {
277            match self.peek() {
278                None => {
279                    // Unterminated string - return error
280                    return self.token(TokenKind::Error, start);
281                }
282                Some('"') => {
283                    self.advance();
284                    break;
285                }
286                Some('\\') => {
287                    // Escape sequence - consume backslash and next char
288                    self.advance();
289                    if self.peek().is_some() {
290                        self.advance();
291                    }
292                }
293                Some(_) => {
294                    self.advance();
295                }
296            }
297        }
298
299        self.token(TokenKind::QuotedScalar, start)
300    }
301
302    // parser[impl comment.line]
303    /// Tokenize a line comment: `// ...`.
304    fn tokenize_line_comment(&mut self) -> Token<'src> {
305        let start = self.pos;
306
307        // Consume `//`
308        self.advance();
309        self.advance();
310
311        // Consume until end of line
312        while let Some(c) = self.peek() {
313            if c == '\n' || c == '\r' {
314                break;
315            }
316            self.advance();
317        }
318
319        self.token(TokenKind::LineComment, start)
320    }
321
322    /// Tokenize a doc comment: `/// ...`.
323    fn tokenize_doc_comment(&mut self) -> Token<'src> {
324        let start = self.pos;
325
326        // Consume `///`
327        self.advance();
328        self.advance();
329        self.advance();
330
331        // Consume until end of line
332        while let Some(c) = self.peek() {
333            if c == '\n' || c == '\r' {
334                break;
335            }
336            self.advance();
337        }
338
339        self.token(TokenKind::DocComment, start)
340    }
341
342    /// Tokenize a heredoc start: `<<DELIM`.
343    ///
344    /// Per parser[scalar.heredoc.syntax]: delimiter MUST match `[A-Z][A-Z0-9_]*`
345    /// and not exceed 16 characters.
346    // parser[impl scalar.heredoc.syntax]
347    fn tokenize_heredoc_start(&mut self) -> Token<'src> {
348        let start = self.pos;
349
350        // Consume `<<`
351        self.advance();
352        self.advance();
353
354        let delim_start = self.pos as usize;
355
356        // First char MUST be uppercase letter
357        match self.peek() {
358            Some(c) if c.is_ascii_uppercase() => {
359                self.advance();
360            }
361            _ => {
362                // Invalid delimiter - first char not uppercase letter
363                // Consume any remaining delimiter-like chars for error recovery
364                while let Some(c) = self.peek() {
365                    if c.is_ascii_uppercase() || c.is_ascii_digit() || c == '_' {
366                        self.advance();
367                    } else {
368                        break;
369                    }
370                }
371                return self.token(TokenKind::Error, start);
372            }
373        }
374
375        // Rest: uppercase, digit, or underscore
376        while let Some(c) = self.peek() {
377            if c.is_ascii_uppercase() || c.is_ascii_digit() || c == '_' {
378                self.advance();
379            } else {
380                break;
381            }
382        }
383
384        let delimiter = &self.source[delim_start..self.pos as usize];
385
386        // Check length <= 16
387        if delimiter.len() > 16 {
388            return self.token(TokenKind::Error, start);
389        }
390
391        // Consume optional language hint: ,lang where lang matches [a-z][a-z0-9_.-]*
392        // parser[impl scalar.heredoc.lang]
393        // The language hint is metadata and does not affect the scalar content.
394        if self.peek() == Some(',') {
395            self.advance(); // consume ','
396            // First char must be lowercase letter
397            if let Some(c) = self.peek()
398                && c.is_ascii_lowercase()
399            {
400                self.advance();
401                // Rest: lowercase, digit, underscore, dot, hyphen
402                while let Some(c) = self.peek() {
403                    if c.is_ascii_lowercase()
404                        || c.is_ascii_digit()
405                        || c == '_'
406                        || c == '.'
407                        || c == '-'
408                    {
409                        self.advance();
410                    } else {
411                        break;
412                    }
413                }
414            }
415        }
416
417        // Consume newline after delimiter (and optional lang hint)
418        if self.peek() == Some('\r') {
419            self.advance();
420        }
421        if self.peek() == Some('\n') {
422            self.advance();
423        }
424
425        // Set state for heredoc content
426        self.heredoc_state = Some(HeredocState {
427            delimiter: delimiter.to_string(),
428            closing_indent: None,
429        });
430
431        self.token(TokenKind::HeredocStart, start)
432    }
433
434    /// Check if the remaining input starts with the heredoc delimiter (possibly indented).
435    /// Returns Some(indent_len) if found, where indent_len is the number of leading spaces/tabs.
436    /// The delimiter must be followed by newline or EOF to be valid.
437    fn find_heredoc_delimiter(&self, delimiter: &str) -> Option<usize> {
438        // Count leading whitespace
439        let indent_len = self
440            .remaining
441            .chars()
442            .take_while(|c| *c == ' ' || *c == '\t')
443            .count();
444
445        // Check if delimiter follows the whitespace
446        let after_indent = &self.remaining[indent_len..];
447        if let Some(after_delim) = after_indent.strip_prefix(delimiter)
448            && (after_delim.is_empty()
449                || after_delim.starts_with('\n')
450                || after_delim.starts_with("\r\n"))
451        {
452            return Some(indent_len);
453        }
454        None
455    }
456
457    /// Tokenize heredoc content until we find the closing delimiter.
458    /// Per parser[scalar.heredoc.syntax]: The closing delimiter line MAY be indented;
459    /// that indentation is stripped from content lines.
460    fn tokenize_heredoc_content(&mut self, delimiter: &str) -> Token<'src> {
461        let start = self.pos;
462
463        // Check if we're at the delimiter (possibly indented) - end of heredoc
464        if let Some(indent_len) = self.find_heredoc_delimiter(delimiter) {
465            // This is the end delimiter - consume indent + delimiter
466            self.advance_by(indent_len + delimiter.len());
467            self.heredoc_state = None;
468            return self.token(TokenKind::HeredocEnd, start);
469        }
470
471        // Consume content until we find the delimiter at start of a line (possibly indented)
472        let mut found_end = false;
473        let mut closing_indent = 0usize;
474        while !self.is_eof() {
475            // Consume the current line
476            while let Some(c) = self.peek() {
477                if c == '\n' {
478                    self.advance();
479                    break;
480                } else if c == '\r' && self.peek_nth(1) == Some('\n') {
481                    self.advance();
482                    self.advance();
483                    break;
484                }
485                self.advance();
486            }
487
488            // Check if next line starts with delimiter (possibly indented)
489            if let Some(indent_len) = self.find_heredoc_delimiter(delimiter) {
490                found_end = true;
491                closing_indent = indent_len;
492                break;
493            }
494
495            if self.is_eof() {
496                break;
497            }
498        }
499
500        if start == self.pos
501            && found_end
502            && let Some(indent_len) = self.find_heredoc_delimiter(delimiter)
503        {
504            // No content, return the end delimiter
505            self.advance_by(indent_len + delimiter.len());
506            self.heredoc_state = None;
507            return self.token(TokenKind::HeredocEnd, start);
508        }
509
510        // CRITICAL: If we hit EOF without finding the closing delimiter,
511        // we must clear the heredoc state to avoid an infinite loop.
512        // The next call would otherwise re-enter tokenize_heredoc_content forever.
513        if self.is_eof() && !found_end {
514            self.heredoc_state = None;
515            return self.token(TokenKind::Error, start);
516        }
517
518        // Store the closing indent so the lexer can apply dedent
519        if let Some(ref mut state) = self.heredoc_state {
520            state.closing_indent = Some(closing_indent);
521        }
522
523        self.token(TokenKind::HeredocContent, start)
524    }
525
526    // parser[impl scalar.raw.syntax]
527    /// Tokenize a raw string: `r#*"..."#*`.
528    /// Returns the entire raw string including delimiters.
529    fn tokenize_raw_string(&mut self) -> Token<'src> {
530        let start = self.pos;
531
532        // Consume `r`
533        self.advance();
534
535        // Count and consume `#` marks
536        let mut hash_count: u8 = 0;
537        while self.peek() == Some('#') {
538            hash_count = hash_count.saturating_add(1);
539            self.advance();
540        }
541
542        // Consume opening `"`
543        if self.peek() == Some('"') {
544            self.advance();
545        } else {
546            // Invalid raw string - no opening quote
547            return self.token(TokenKind::Error, start);
548        }
549
550        // Consume content until we find the closing `"#*`
551        loop {
552            match self.peek() {
553                None => {
554                    // Unterminated raw string - return error
555                    return self.token(TokenKind::Error, start);
556                }
557                Some('"') => {
558                    // Check for closing sequence
559                    let mut matched_hashes = 0u8;
560                    let mut lookahead = 1;
561                    while matched_hashes < hash_count {
562                        if self.peek_nth(lookahead) == Some('#') {
563                            matched_hashes += 1;
564                            lookahead += 1;
565                        } else {
566                            break;
567                        }
568                    }
569
570                    if matched_hashes == hash_count {
571                        // Found the closing delimiter - consume it
572                        self.advance(); // consume `"`
573                        for _ in 0..hash_count {
574                            self.advance(); // consume `#`s
575                        }
576                        // Return token with full text including delimiters
577                        return self.token(TokenKind::RawScalar, start);
578                    } else {
579                        // Not a closing delimiter, consume the `"` as content
580                        self.advance();
581                    }
582                }
583                Some(_) => {
584                    self.advance();
585                }
586            }
587        }
588    }
589}
590
591impl<'src> Iterator for Tokenizer<'src> {
592    type Item = Token<'src>;
593
594    fn next(&mut self) -> Option<Self::Item> {
595        let token = self.next_token();
596        if token.kind == TokenKind::Eof {
597            None
598        } else {
599            Some(token)
600        }
601    }
602}
603
604// parser[impl scalar.bare.chars]
605/// Check if a character can start a bare scalar.
606fn is_bare_scalar_start(c: char) -> bool {
607    // Cannot be special chars, whitespace, or `/` (to avoid confusion with comments)
608    // `=` and `@` are allowed after first char but not at start
609    !matches!(c, '{' | '}' | '(' | ')' | ',' | '"' | '=' | '@' | '>' | '/') && !c.is_whitespace()
610}
611
612// parser[impl scalar.bare.chars]
613/// Check if a character can continue a bare scalar.
614fn is_bare_scalar_char(c: char) -> bool {
615    // Cannot be special chars or whitespace
616    // `/`, `@`, and `=` are allowed after the first char
617    // `>` is never allowed (attribute separator)
618    !matches!(c, '{' | '}' | '(' | ')' | ',' | '"' | '>') && !c.is_whitespace()
619}
620
621#[cfg(test)]
622mod tests {
623    use super::*;
624    use facet_testhelpers::test;
625
626    fn tokenize(source: &str) -> Vec<(TokenKind, &str)> {
627        Tokenizer::new(source).map(|t| (t.kind, t.text)).collect()
628    }
629
630    #[test]
631    fn test_structural_tokens() {
632        assert_eq!(tokenize("{"), vec![(TokenKind::LBrace, "{")]);
633        assert_eq!(tokenize("}"), vec![(TokenKind::RBrace, "}")]);
634        assert_eq!(tokenize("("), vec![(TokenKind::LParen, "(")]);
635        assert_eq!(tokenize(")"), vec![(TokenKind::RParen, ")")]);
636        assert_eq!(tokenize(","), vec![(TokenKind::Comma, ",")]);
637        assert_eq!(tokenize(">"), vec![(TokenKind::Gt, ">")]);
638        assert_eq!(tokenize("@"), vec![(TokenKind::At, "@")]);
639    }
640
641    #[test]
642    fn test_bare_scalar() {
643        assert_eq!(tokenize("hello"), vec![(TokenKind::BareScalar, "hello")]);
644        assert_eq!(tokenize("42"), vec![(TokenKind::BareScalar, "42")]);
645        assert_eq!(tokenize("true"), vec![(TokenKind::BareScalar, "true")]);
646        assert_eq!(
647            tokenize("https://example.com/path"),
648            vec![(TokenKind::BareScalar, "https://example.com/path")]
649        );
650    }
651
652    #[test]
653    fn test_chained_tag_token() {
654        assert_eq!(
655            tokenize("@must_emit/@discover_start"),
656            vec![(TokenKind::Tag, "@must_emit/@discover_start")]
657        );
658    }
659
660    #[test]
661    fn test_chained_tag_token_with_payload() {
662        assert_eq!(
663            tokenize("@must_emit/@discover_start{executor default}"),
664            vec![
665                (TokenKind::Tag, "@must_emit/@discover_start"),
666                (TokenKind::LBrace, "{"),
667                (TokenKind::BareScalar, "executor"),
668                (TokenKind::Whitespace, " "),
669                (TokenKind::BareScalar, "default"),
670                (TokenKind::RBrace, "}"),
671            ]
672        );
673    }
674
675    #[test]
676    fn test_three_segment_chained_tag_token() {
677        assert_eq!(tokenize("@a/@b/@c"), vec![(TokenKind::Tag, "@a/@b/@c")]);
678    }
679
680    #[test]
681    fn test_chained_tag_token_with_quoted_leaf_payload() {
682        assert_eq!(
683            tokenize(r#"@a/@b"foo""#),
684            vec![
685                (TokenKind::Tag, "@a/@b"),
686                (TokenKind::QuotedScalar, r#""foo""#),
687            ]
688        );
689    }
690
691    #[test]
692    fn test_chained_tag_token_with_raw_leaf_payload() {
693        assert_eq!(
694            tokenize(r##"@a/@br#"foo"#"##),
695            vec![
696                (TokenKind::Tag, "@a/@b"),
697                (TokenKind::RawScalar, r##"r#"foo"#"##),
698            ]
699        );
700    }
701
702    #[test]
703    fn test_chained_tag_token_with_heredoc_leaf_payload() {
704        assert_eq!(
705            tokenize("@a/@b<<EOF\nhello\nEOF"),
706            vec![
707                (TokenKind::Tag, "@a/@b"),
708                (TokenKind::HeredocStart, "<<EOF\n"),
709                (TokenKind::HeredocContent, "hello\n"),
710                (TokenKind::HeredocEnd, "EOF"),
711            ]
712        );
713    }
714
715    #[test]
716    fn test_quoted_scalar() {
717        assert_eq!(
718            tokenize(r#""hello world""#),
719            vec![(TokenKind::QuotedScalar, r#""hello world""#)]
720        );
721        assert_eq!(
722            tokenize(r#""with \"escapes\"""#),
723            vec![(TokenKind::QuotedScalar, r#""with \"escapes\"""#)]
724        );
725    }
726
727    #[test]
728    fn test_raw_scalar() {
729        // Raw scalars now include the full text with delimiters (for lossless CST)
730        assert_eq!(
731            tokenize(r#"r"hello""#),
732            vec![(TokenKind::RawScalar, r#"r"hello""#)]
733        );
734        assert_eq!(
735            tokenize(r##"r#"hello"#"##),
736            vec![(TokenKind::RawScalar, r##"r#"hello"#"##)]
737        );
738    }
739
740    #[test]
741    fn test_comments() {
742        assert_eq!(
743            tokenize("// comment"),
744            vec![(TokenKind::LineComment, "// comment")]
745        );
746        assert_eq!(
747            tokenize("/// doc"),
748            vec![(TokenKind::DocComment, "/// doc")]
749        );
750    }
751
752    #[test]
753    fn test_whitespace() {
754        assert_eq!(tokenize("  \t"), vec![(TokenKind::Whitespace, "  \t")]);
755        assert_eq!(tokenize("\n"), vec![(TokenKind::Newline, "\n")]);
756        assert_eq!(tokenize("\r\n"), vec![(TokenKind::Newline, "\r\n")]);
757    }
758
759    #[test]
760    fn test_mixed() {
761        let tokens = tokenize("{host localhost}");
762        assert_eq!(
763            tokens,
764            vec![
765                (TokenKind::LBrace, "{"),
766                (TokenKind::BareScalar, "host"),
767                (TokenKind::Whitespace, " "),
768                (TokenKind::BareScalar, "localhost"),
769                (TokenKind::RBrace, "}"),
770            ]
771        );
772    }
773
774    #[test]
775    fn test_heredoc() {
776        let tokens = tokenize("<<EOF\nhello\nworld\nEOF");
777        assert_eq!(
778            tokens,
779            vec![
780                (TokenKind::HeredocStart, "<<EOF\n"),
781                (TokenKind::HeredocContent, "hello\nworld\n"),
782                (TokenKind::HeredocEnd, "EOF"),
783            ]
784        );
785    }
786
787    // parser[verify scalar.heredoc.syntax]
788    #[test]
789    fn test_heredoc_valid_delimiters() {
790        // Single uppercase letter
791        assert!(
792            tokenize("<<A\nx\nA")
793                .iter()
794                .all(|t| t.0 != TokenKind::Error)
795        );
796        // Multiple uppercase letters
797        assert!(
798            tokenize("<<EOF\nx\nEOF")
799                .iter()
800                .all(|t| t.0 != TokenKind::Error)
801        );
802        // With digits after first char
803        assert!(
804            tokenize("<<MY123\nx\nMY123")
805                .iter()
806                .all(|t| t.0 != TokenKind::Error)
807        );
808        // With underscores
809        assert!(
810            tokenize("<<MY_DELIM\nx\nMY_DELIM")
811                .iter()
812                .all(|t| t.0 != TokenKind::Error)
813        );
814        // 16 chars (max allowed)
815        assert!(
816            tokenize("<<ABCDEFGHIJKLMNOP\nx\nABCDEFGHIJKLMNOP")
817                .iter()
818                .all(|t| t.0 != TokenKind::Error)
819        );
820    }
821
822    // parser[verify scalar.heredoc.syntax]
823    #[test]
824    fn test_heredoc_must_start_uppercase() {
825        // Starts with digit - error
826        assert!(tokenize("<<123FOO").iter().any(|t| t.0 == TokenKind::Error));
827        // Starts with underscore - error
828        assert!(tokenize("<<_FOO").iter().any(|t| t.0 == TokenKind::Error));
829        // Lowercase - error (tokenizer won't even recognize it as heredoc delimiter chars)
830        let tokens = tokenize("<<foo");
831        // This will be << followed by bare scalar "foo"
832        assert!(!tokens.iter().any(|t| t.0 == TokenKind::HeredocStart));
833    }
834
835    // parser[verify scalar.heredoc.syntax]
836    #[test]
837    fn test_heredoc_max_16_chars() {
838        // 17 chars - error
839        assert!(
840            tokenize("<<ABCDEFGHIJKLMNOPQ\nx\nABCDEFGHIJKLMNOPQ")
841                .iter()
842                .any(|t| t.0 == TokenKind::Error)
843        );
844    }
845
846    #[test]
847    fn test_slash_in_bare_scalar() {
848        // Single slash followed by text should be a bare scalar
849        let tokens = tokenize("/foo");
850        assert_eq!(tokens, vec![(TokenKind::BareScalar, "/foo")]);
851
852        // Path-like value
853        let tokens = tokenize("/usr/bin/foo");
854        assert_eq!(tokens, vec![(TokenKind::BareScalar, "/usr/bin/foo")]);
855
856        // But // is still a comment
857        let tokens = tokenize("// comment");
858        assert_eq!(tokens, vec![(TokenKind::LineComment, "// comment")]);
859    }
860
861    #[test]
862    fn test_attribute_syntax_tokens() {
863        // Check how the tokenizer tokenizes attribute syntax
864        let tokens = tokenize("server host>localhost");
865        // Tokenizer produces separate tokens - attribute syntax is handled by the parser
866        assert_eq!(
867            tokens,
868            vec![
869                (TokenKind::BareScalar, "server"),
870                (TokenKind::Whitespace, " "),
871                (TokenKind::BareScalar, "host"),
872                (TokenKind::Gt, ">"),
873                (TokenKind::BareScalar, "localhost"),
874            ]
875        );
876    }
877
878    #[test]
879    fn test_unterminated_heredoc() {
880        // Heredoc without closing delimiter should be an error
881        let tokens = tokenize("<<EOF\nhello world\n");
882        eprintln!("tokens = {:?}", tokens);
883        assert!(
884            tokens.iter().any(|t| t.0 == TokenKind::Error),
885            "Expected Error token for unterminated heredoc"
886        );
887    }
888
889    #[test]
890    fn test_unterminated_string() {
891        // String without closing quote should be an error
892        let tokens = tokenize("\"hello");
893        eprintln!("tokens = {:?}", tokens);
894        assert!(
895            tokens.iter().any(|t| t.0 == TokenKind::Error),
896            "Expected Error token for unterminated string"
897        );
898    }
899}