parlex_calc/
parser.rs

1//! # Calculator Parser
2//!
3//! This module couples the generated SLR(1) parser tables with calculator-
4//! specific semantic actions. It exposes:
5//!
6//! - [`parser_data`]: generated automaton, productions, and IDs,
7//! - [`CalcParserDriver`]: semantic hooks for reductions and ambiguities,
8//! - [`CalcParser`]: a thin adapter that pulls tokens from the lexer and
9//!   yields fully reduced results.
10//!
11//! The parser consumes tokens produced by [`CalcLexer`] and uses a mutable
12//! [`SymTab`] as shared context (for interning identifiers and storing values).
13//!
14//! ## Behavior highlights
15//! - **Operator precedence & associativity** are enforced via the ambiguity
16//!   resolver: **shift** on higher-precedence lookahead, otherwise **reduce**.
17//!   All binary operators in this grammar are left-associative; the prefix
18//!   unary minus is handled separately and does not introduce conflicts.
19//! - **Assignments** store the value in the symbol table and evaluate to the
20//!   assigned value.
21//! - **Empty statements** (a bare `;`) are emitted as a `Stat` token with
22//!   [`TokenValue::None`].
23
24use crate::{CalcLexer, CalcToken, SymTab, TokenID, TokenValue};
25use parlex::{
26    LexerStats, ParlexError, Parser, ParserAction, ParserData, ParserDriver, ParserStats, Token,
27};
28use parser_data::{AmbigID, ParData, ProdID, StateID};
29use std::marker::PhantomData;
30use try_next::TryNextWithContext;
31
32/// Includes the generated SLR parser tables and definitions.
33///
34/// This file (`parser_data.rs`) is produced by the **parlex-gen** [`aslr`] tool
35/// during the build process. It defines the parsing automaton, rule metadata,
36/// and associated enum types used by the [`CalcParser`].
37pub mod parser_data {
38    include!(concat!(env!("OUT_DIR"), "/parser_data.rs"));
39}
40
41/// A driver that defines semantic actions for the calculator parser.
42///
43/// The [`CalcParserDriver`] type implements [`ParserDriver`] and acts as the
44/// bridge between the parser engine ([`Parser`]) and calculator-specific
45/// semantic logic.
46///
47/// It provides the behavior for grammar reductions and ambiguity resolution
48/// during parsing. Each reduction corresponds to a grammar production rule
49/// in [`ParData`], and is responsible for building or evaluating partial
50/// results (e.g., computing arithmetic expressions, populating the symbol
51/// table), constructing AST, etc.
52///
53/// # Type Parameters
54///
55/// - `I`: The input source (the lexer) that yields [`CalcToken`]s and maintains a
56///   contextual [`SymTab`]. Must implement
57///   [`TryNextWithContext<SymTab, Item = CalcToken>`].
58///
59/// # Associated Types
60///
61/// - `ParserData = ParData`:
62///   Generated parser metadata containing grammar rules, production IDs,
63///   and ambiguity identifiers.
64/// - `Token = CalcToken`:
65///   The token type produced by the lexer and consumed by this parser.
66/// - `Parser = Parser<I, Self, SymTab>`:
67///   The parser engine parameterized by this driver and context.
68/// - `Error = CalcError`:
69///   Unified error type propagated during parsing.
70/// - `Context = SymTab`:
71///   Externally supplied context.
72///
73/// # Responsibilities
74///
75/// The parser driver performs calculator-specific actions:
76///
77/// - **`resolve_ambiguity`** — invoked when the grammar allows multiple valid
78///   interpretations of a token sequence. The driver chooses which parse path
79///   to follow by returning an appropriate [`ParserAction`].
80/// - **`reduce`** — executed when a grammar production completes. The driver
81///   can perform semantic actions such as arithmetic evaluation, updating the
82///   symbol table, or producing intermediate values.
83///
84/// # Notes
85///
86/// - The driver may be stateless (`_marker` only), or store intermediate
87///   evaluation state if needed.
88/// - Ambiguities can be resolved dynamically based on the current parse state
89///   or the next lookahead token.
90/// - The `reduce` method corresponds to grammar rules such as:
91///   ```text
92///   Expr → Expr '+' Expr
93///   Expr → NUMBER
94///   ```
95///   allowing the driver to fold numerical operations or emit results or
96///   result  nodes.
97pub struct CalcParserDriver<I> {
98    /// Marker to associate the driver with its input type `I`.
99    _marker: PhantomData<I>,
100}
101
102impl<I> ParserDriver for CalcParserDriver<I>
103where
104    I: TryNextWithContext<SymTab, LexerStats, Item = CalcToken, Error: std::fmt::Display + 'static>,
105{
106    /// Parser metadata generated from the calculator grammar.
107    type ParserData = ParData;
108
109    /// Token type consumed by the parser.
110    type Token = CalcToken;
111
112    /// Concrete parser engine type.
113    type Parser = Parser<I, Self, Self::Context>;
114
115    /// Context (symbol table or shared state).
116    type Context = SymTab;
117
118    /// Resolves grammar ambiguities when multiple parse actions are valid.
119    ///
120    /// The driver can inspect the parser conflict (`ambig`) and the upcoming
121    /// token (`token`) to decide which parse branch to follow. This method
122    /// returns the selected [`ParserAction`].
123    ///
124    /// By default, most calculator grammars are unambiguous, so this method
125    /// may simply return a default action or be left unimplemented.
126    ///
127    /// # Shift/Reduce Conflicts
128    ///
129    /// In practice, this hook is primarily used to resolve **Shift/Reduce**
130    /// conflicts — cases where the parser can either:
131    /// - **Reduce** using a completed production rule, or
132    /// - **Shift** the next incoming token (`token`).
133    ///
134    /// Other types of conflicts (such as **Reduce/Reduce**) are much more
135    /// difficult to handle programmatically and usually require modifying
136    /// the grammar itself to eliminate ambiguity.
137    ///
138    /// In a typical arithmetic grammar, you can use operator precedence and
139    /// associativity to decide whether to shift or reduce. For example:
140    ///
141    /// ```text
142    /// Expr -> Expr '+' Expr
143    /// ```
144    ///
145    /// When the incoming token is `*`, the driver can compare the precedence
146    /// of `'+'` (lower) vs. `'*'` (higher) and decide to **Shift**, allowing
147    /// the parser to defer reduction until the higher-precedence operation
148    /// (`*`) is parsed first.
149    ///
150    /// This strategy ensures that the resulting parse tree respects the
151    /// intended operator precedence and associativity rules.
152    fn resolve_ambiguity(
153        &mut self,
154        _parser: &mut Self::Parser,
155        _context: &mut Self::Context,
156        ambig: <Self::ParserData as ParserData>::AmbigID,
157        token: &Self::Token,
158    ) -> Result<ParserAction<StateID, ProdID, AmbigID>, ParlexError> {
159        let ambig_tab = ParData::lookup_ambig(ambig);
160        let shift = ambig_tab[0];
161        let reduce = ambig_tab[1];
162        let ParserAction::Shift(_) = shift else {
163            panic!("expected shift");
164        };
165        let ParserAction::Reduce(prod_id) = reduce else {
166            panic!("expected reduce");
167        };
168        let CalcToken { token_id, .. } = token;
169
170        match prod_id {
171            ProdID::Expr3 | ProdID::Expr4 => {
172                // Expr -> Expr + Expr | Expr - Expr
173                match token_id {
174                    TokenID::Plus | TokenID::Minus => Ok(reduce), // `+` and `-` are left-associative; `-` can't be unary in this context
175                    TokenID::Asterisk | TokenID::Slash => Ok(shift), // `*` and `/` have higher precedence than `+` and `-`
176                    _ => unreachable!(),
177                }
178            }
179            ProdID::Expr5 | ProdID::Expr6 => {
180                // Expr -> Expr * Expr | Expr / Expr
181                Ok(reduce) // the lookahead `-` can't be unary; therefore, we reduce either by higher precedence or left associativity
182            }
183            ProdID::Expr7 => {
184                // Expr -> - Expr
185                Ok(reduce) // unary `-` has higher precedence than anything else
186            }
187            _ => panic!("unexpected prod in ambiguity"),
188        }
189    }
190
191    /// Performs semantic reduction for a completed grammar production.
192    ///
193    /// This is the main hook for calculator logic: each time the parser
194    /// recognizes a rule (identified by `prod_id`), the driver can evaluate
195    /// or construct the corresponding result, possibly updating the context.
196    ///
197    /// For example, when reducing:
198    /// ```text
199    /// Expr -> Expr '+' Expr
200    /// ```
201    /// the driver may pop the right-hand values from the parser stack, perform
202    /// addition, and push the result back.
203    fn reduce(
204        &mut self,
205        parser: &mut Self::Parser,
206        context: &mut Self::Context,
207        prod_id: <Self::ParserData as ParserData>::ProdID,
208        token: &Self::Token,
209    ) -> Result<(), ParlexError> {
210        match prod_id {
211            ProdID::Start => {
212                // Start -> Stat
213                // Accept - does not get reduced
214                unreachable!()
215            }
216            ProdID::Stat1 => {
217                // Stat ->
218                parser.tokens_push(CalcToken {
219                    token_id: TokenID::Stat,
220                    span: token.span(),
221                    value: TokenValue::None,
222                });
223            }
224            ProdID::Stat2 => {
225                // Stat -> comment Stat
226                let mut stat = parser.tokens_pop();
227                let comment_tok = parser.tokens_pop();
228                let TokenValue::Comment(comment) = comment_tok.value else {
229                    unreachable!()
230                };
231                stat.to_statement(Some(comment));
232                stat.merge_span(&comment_tok.span);
233                parser.tokens_push(stat);
234            }
235            ProdID::Stat3 => {
236                // Stat -> Expr
237                let mut expr = parser.tokens_pop();
238                expr.token_id = TokenID::Stat;
239                parser.tokens_push(expr);
240            }
241            ProdID::Stat4 => {
242                // Stat -> ident = Expr
243                let mut expr = parser.tokens_pop();
244                let TokenValue::Number(value) = expr.value else {
245                    unreachable!()
246                };
247                parser.tokens_pop();
248                let ident = parser.tokens_pop();
249                let TokenValue::Ident(index) = ident.value else {
250                    unreachable!()
251                };
252                context
253                    .set(index, value)
254                    .map_err(|e| ParlexError::from_err(e, ident.span()))?; //TODO: fix span
255                expr.token_id = TokenID::Stat;
256                expr.merge_span(&ident.span);
257                parser.tokens_push(expr);
258            }
259            ProdID::Expr1 => {
260                // Expr -> number
261                let mut number = parser.tokens_pop();
262                number.token_id = TokenID::Expr;
263                parser.tokens_push(number);
264            }
265            ProdID::Expr2 => {
266                // Expr -> ident
267                let mut tok = parser.tokens_pop();
268                tok.token_id = TokenID::Expr;
269                let TokenValue::Ident(index) = tok.value else {
270                    unreachable!()
271                };
272                tok.value = TokenValue::Number(
273                    context
274                        .get(index)
275                        .map_err(|e| ParlexError::from_err(e, tok.span()))?,
276                );
277                parser.tokens_push(tok);
278            }
279            ProdID::Expr3 => {
280                // Expr -> Expr + Expr
281                let expr2 = parser.tokens_pop();
282                parser.tokens_pop();
283                let mut expr1 = parser.tokens_pop();
284                let TokenValue::Number(value1) = expr1.value else {
285                    unreachable!()
286                };
287                let TokenValue::Number(value2) = expr2.value else {
288                    unreachable!()
289                };
290                expr1.value = TokenValue::Number(value1 + value2);
291                expr1.merge_span(&expr2.span);
292                parser.tokens_push(expr1);
293            }
294            ProdID::Expr4 => {
295                // Expr -> Expr - Expr
296                let expr2 = parser.tokens_pop();
297                parser.tokens_pop();
298                let mut expr1 = parser.tokens_pop();
299                let TokenValue::Number(value1) = expr1.value else {
300                    unreachable!()
301                };
302                let TokenValue::Number(value2) = expr2.value else {
303                    unreachable!()
304                };
305                expr1.value = TokenValue::Number(value1 - value2);
306                expr1.merge_span(&expr2.span);
307                parser.tokens_push(expr1);
308            }
309            ProdID::Expr5 => {
310                // Expr -> Expr * Expr
311                let expr2 = parser.tokens_pop();
312                parser.tokens_pop();
313                let mut expr1 = parser.tokens_pop();
314                let TokenValue::Number(value1) = expr1.value else {
315                    unreachable!()
316                };
317                let TokenValue::Number(value2) = expr2.value else {
318                    unreachable!()
319                };
320                expr1.value = TokenValue::Number(value1 * value2);
321                expr1.merge_span(&expr2.span);
322                parser.tokens_push(expr1);
323            }
324            ProdID::Expr6 => {
325                // Expr -> Expr / Expr
326                let expr2 = parser.tokens_pop();
327                parser.tokens_pop();
328                let mut expr1 = parser.tokens_pop();
329                let TokenValue::Number(value1) = expr1.value else {
330                    unreachable!()
331                };
332                let TokenValue::Number(value2) = expr2.value else {
333                    unreachable!()
334                };
335                expr1.value = TokenValue::Number(value1 / value2);
336                expr1.merge_span(&expr2.span);
337                parser.tokens_push(expr1);
338            }
339            ProdID::Expr7 => {
340                // Expr -> - Expr
341                let mut expr = parser.tokens_pop();
342                let minus = parser.tokens_pop();
343                let TokenValue::Number(value) = expr.value else {
344                    unreachable!()
345                };
346                expr.value = TokenValue::Number(-value);
347                expr.merge_span(&minus.span);
348                parser.tokens_push(expr);
349            }
350            ProdID::Expr8 => {
351                // Expr -> ( Expr )
352                let left_paren = parser.tokens_pop();
353                let mut expr = parser.tokens_pop();
354                let right_paren = parser.tokens_pop();
355                expr.merge_span(&left_paren.span);
356                expr.merge_span(&right_paren.span);
357                parser.tokens_push(expr);
358            }
359        }
360        Ok(())
361    }
362}
363
364/// The calculator parser, a wrapper that couples:
365/// - the calculator lexer ([`CalcLexer`]) producing [`CalcToken`]s, and
366/// - the calculator parser driver ([`CalcParserDriver`]) implementing reductions
367///   and ambiguity resolution for the calculator grammar.
368///
369/// `CalcParser<I>` exposes an iterator-like interface via
370/// [`TryNextWithContext`], yielding completed parse results (e.g., one per
371/// “sentence” or top-level expression) while using a shared [`SymTab`] as
372/// context. Internally it owns a generic [`Parser`] that pulls tokens
373/// from `CalcLexer` and executes semantic actions in `CalcParserDriver`.
374///
375/// # Input / Output
376///
377/// - **Input**: any byte stream `I` implementing
378///   [`TryNextWithContext<SymTab, Item = u8>`].
379/// - **Output**: completed parsing units as [`CalcToken`] values (typically
380///   grammar-level results like expressions/statements).
381///
382/// # End Tokens and Multiple Sentences
383///
384/// The underlying lexer typically emits an explicit [`TokenID::End`] token at
385/// the end of a *parsing unit* (end of “sentence” or expression). The parser
386/// uses this to finalize and emit one result. If the input contains multiple
387/// independent sentences, you will receive multiple results — one per `End` —
388/// and `None` only after all input is consumed.
389///
390/// # Empty Statements
391///
392/// The calculator grammar also accepts an *empty* statement, which is returned
393/// as a token with [`TokenValue::None`].
394/// This occurs, for example, when the last statement in the input is terminated
395/// by a semicolon (`;`) but followed by no further expression. In that case:
396///
397/// 1. The parser first emits the token for the preceding completed statement.
398/// 2. It then emits an additional token representing the empty statement
399///    (`TokenValue::None`).
400/// 3. Finally, it returns `None`, indicating the end of the input stream.
401///
402/// This design allows the parser to fully reflect the structure of the input,
403/// including empty or separator-only statements.
404///
405/// # Errors
406///
407/// All failures are surfaced through a composed
408/// [`ParserError<LexerError<I::Error, CalcError>, CalcError, CalcToken>`]:
409/// - `I::Error` — errors from the input source,
410/// - [`CalcError`] — lexical/semantic errors (e.g., UTF-8, integer parsing,
411///   symbol-table issues).
412///
413/// # Example
414///
415/// ```rust
416/// # use parlex_calc::{CalcToken, CalcParser, SymTab, TokenID, TokenValue};
417/// # use try_next::{IterInput, TryNextWithContext};
418/// let mut symtab = SymTab::new();
419/// let input = IterInput::from("hello = 1;\n foo =\n 5 + 3 * 2;\n (world + hello + 10) * -2;\n\n1000 - - -123".bytes());
420/// let mut parser = CalcParser::try_new(input).unwrap();
421/// let vs = parser.try_collect_with_context(&mut symtab).unwrap();
422/// assert_eq!(vs.len(), 4);
423/// assert_eq!(symtab.len(), 3);
424/// ```
425pub struct CalcParser<I>
426where
427    I: TryNextWithContext<SymTab, Item = u8, Error: std::fmt::Display + 'static>,
428{
429    parser: Parser<CalcLexer<I>, CalcParserDriver<CalcLexer<I>>, SymTab>,
430}
431
432impl<I> CalcParser<I>
433where
434    I: TryNextWithContext<SymTab, Item = u8, Error: std::fmt::Display + 'static>,
435{
436    pub fn try_new(input: I) -> Result<Self, ParlexError> {
437        let lexer = CalcLexer::try_new(input)?;
438        let driver = CalcParserDriver {
439            _marker: PhantomData,
440        };
441        let parser = Parser::new(lexer, driver);
442        Ok(Self { parser })
443    }
444}
445impl<I> TryNextWithContext<SymTab, (LexerStats, ParserStats)> for CalcParser<I>
446where
447    I: TryNextWithContext<SymTab, Item = u8, Error: std::fmt::Display + 'static>,
448{
449    type Item = CalcToken;
450    type Error = ParlexError;
451
452    /// Returns the next fully reduced unit (`Stat`), or `None` at end of input.
453    ///
454    /// The underlying lexer typically emits an explicit [`TokenID::End`] at
455    /// unit boundaries (e.g., semicolon-terminated statements). The parser
456    /// finalizes and yields one `Stat` per such boundary.
457    fn try_next_with_context(
458        &mut self,
459        context: &mut SymTab,
460    ) -> Result<Option<CalcToken>, ParlexError> {
461        self.parser.try_next_with_context(context)
462    }
463
464    fn stats(&self) -> (LexerStats, ParserStats) {
465        self.parser.stats()
466    }
467}
468
469#[cfg(test)]
470mod tests {
471    use crate::{CalcParser, CalcToken, SymTab, TokenID, TokenValue};
472    use parlex::span;
473    use try_next::{IterInput, TryNextWithContext};
474
475    #[test]
476    fn parses_four_stats() {
477        let _ = env_logger::builder().is_test(true).try_init();
478        let mut symtab = SymTab::new();
479        let input = IterInput::from(
480            "hello = 1;\n 1 + 2;\n (world + hello + 10) * -2;\n\n1000 - - -123;".bytes(),
481        );
482        let mut parser = CalcParser::try_new(input).unwrap();
483        assert!(matches!(
484            parser.try_next_with_context(&mut symtab).unwrap(),
485            Some(CalcToken {
486                token_id: TokenID::Stat,
487                span: span!(0, 0, 0, 9),
488                value: TokenValue::Number(1)
489            }),
490        ));
491        assert!(matches!(
492            parser.try_next_with_context(&mut symtab).unwrap(),
493            Some(CalcToken {
494                token_id: TokenID::Stat,
495                span: span!(1, 1, 1, 6),
496                value: TokenValue::Number(3)
497            }),
498        ));
499        assert!(matches!(
500            parser.try_next_with_context(&mut symtab).unwrap(),
501            Some(CalcToken {
502                token_id: TokenID::Stat,
503                span: span!(2, 1, 2, 26),
504                value: TokenValue::Number(-22)
505            }),
506        ));
507        assert!(matches!(
508            parser.try_next_with_context(&mut symtab).unwrap(),
509            Some(CalcToken {
510                token_id: TokenID::Stat,
511                span: span!(4, 0, 4, 13),
512                value: TokenValue::Number(877)
513            }),
514        ));
515        assert!(matches!(
516            parser.try_next_with_context(&mut symtab).unwrap(),
517            Some(CalcToken {
518                token_id: TokenID::Stat,
519                span: span!(4, 14, 4, 14),
520                value: TokenValue::None
521            }),
522        ));
523        assert!(matches!(
524            parser.try_next_with_context(&mut symtab).unwrap(),
525            None,
526        ));
527        assert!(matches!(
528            parser.try_next_with_context(&mut symtab).unwrap(),
529            None,
530        ));
531    }
532
533    #[test]
534    fn parses_assignment_and_reference() {
535        // x = 2; x + 3;
536        let _ = env_logger::builder().is_test(true).try_init();
537        let mut symtab = SymTab::new();
538        let input = IterInput::from("x = 2;\n x + 3;".bytes());
539        let mut parser = CalcParser::try_new(input).unwrap();
540
541        // x = 2;
542        let t1 = parser.try_next_with_context(&mut symtab).unwrap().unwrap();
543        assert!(matches!(
544            t1,
545            CalcToken {
546                token_id: TokenID::Stat,
547                span: span!(0, 0, 0, 5),
548                value: TokenValue::Number(2)
549            }
550        ));
551
552        // x + 3;
553        let t2 = parser.try_next_with_context(&mut symtab).unwrap().unwrap();
554        assert!(matches!(
555            t2,
556            CalcToken {
557                token_id: TokenID::Stat,
558                span: span!(1, 1, 1, 6),
559                value: TokenValue::Number(5)
560            }
561        ));
562
563        // empty
564        let t3 = parser.try_next_with_context(&mut symtab).unwrap().unwrap();
565        assert!(matches!(
566            t3,
567            CalcToken {
568                token_id: TokenID::Stat,
569                span: span!(1, 7, 1, 7),
570                value: TokenValue::None
571            }
572        ));
573
574        // EOF
575        assert!(parser.try_next_with_context(&mut symtab).unwrap().is_none());
576        // symbol table contains one identifier
577        assert_eq!(symtab.len(), 1);
578    }
579
580    #[test]
581    fn respects_operator_precedence_and_unary_minus() {
582        // 1 + 2 * 3;  -(1 + 2) * 3;
583        let _ = env_logger::builder().is_test(true).try_init();
584        let mut symtab = SymTab::new();
585        let input = IterInput::from("1 + 2 * 3;\n-(1 + 2) * 3".bytes());
586        let mut parser = CalcParser::try_new(input).unwrap();
587
588        // 1 + 2 * 3 => 7
589        let t1 = parser.try_next_with_context(&mut symtab).unwrap().unwrap();
590        dbg!(parser.stats());
591        assert!(matches!(
592            t1,
593            CalcToken {
594                token_id: TokenID::Stat,
595                span: span!(0, 0, 0, 9),
596                value: TokenValue::Number(7)
597            }
598        ));
599
600        // -(1 + 2) * 3 => -9
601        let t2 = parser.try_next_with_context(&mut symtab).unwrap().unwrap();
602        assert!(matches!(
603            t2,
604            CalcToken {
605                token_id: TokenID::Stat,
606                span: span!(1, 0, 1, 12),
607                value: TokenValue::Number(-9)
608            }
609        ));
610
611        assert!(parser.try_next_with_context(&mut symtab).unwrap().is_none());
612    }
613
614    #[test]
615    fn emits_empty_statement_as_none() {
616        // 1; ;
617        let _ = env_logger::builder().is_test(true).try_init();
618        let mut symtab = SymTab::new();
619        let input = IterInput::from("1; ;".bytes());
620        let mut parser = CalcParser::try_new(input).unwrap();
621
622        // 1;
623        let t1 = parser.try_next_with_context(&mut symtab).unwrap().unwrap();
624        assert!(matches!(
625            t1,
626            CalcToken {
627                token_id: TokenID::Stat,
628                value: TokenValue::Number(1),
629                ..
630            }
631        ));
632
633        // empty ;
634        let t2 = parser.try_next_with_context(&mut symtab).unwrap().unwrap();
635        assert!(matches!(
636            t2,
637            CalcToken {
638                token_id: TokenID::Stat,
639                value: TokenValue::None,
640                ..
641            }
642        ));
643
644        // empty EOF
645        let t3 = parser.try_next_with_context(&mut symtab).unwrap().unwrap();
646        assert!(matches!(
647            t3,
648            CalcToken {
649                token_id: TokenID::Stat,
650                value: TokenValue::None,
651                ..
652            }
653        ));
654
655        // EOF
656        assert!(parser.try_next_with_context(&mut symtab).unwrap().is_none());
657    }
658
659    #[test]
660    fn parentheses_override_precedence() {
661        // (1 + 2) * 3 => 9
662        let _ = env_logger::builder().is_test(true).try_init();
663        let mut symtab = SymTab::new();
664        let input = IterInput::from("(1 + 2) * 3".bytes());
665        let mut parser = CalcParser::try_new(input).unwrap();
666
667        let t = parser.try_next_with_context(&mut symtab).unwrap().unwrap();
668        assert!(matches!(
669            t,
670            CalcToken {
671                token_id: TokenID::Stat,
672                value: TokenValue::Number(9),
673                ..
674            }
675        ));
676
677        assert!(parser.try_next_with_context(&mut symtab).unwrap().is_none());
678    }
679}