df_ls_syntax_analysis 0.3.0-rc.1

A language server for Dwarf Fortress RAW files
Documentation
#![allow(clippy::needless_update)]

use df_ls_debug_structure::*;
use df_ls_diagnostics::lsp_types::*;
use df_ls_lexical_analysis::test_utils::LexerTestBuilder;
use df_ls_syntax_analysis::test_utils::SyntaxTestBuilder;

/// Testing when `primary_token` has an error how it handles next tokens
/// (Regression test for issue #36)
/// (Slightly update because of #32)
#[test]
fn primary_token_error_1() {
    SyntaxTestBuilder::from_lexer_test_builder(
        LexerTestBuilder::test_source(
            "h
            [MAIN:TYPE1]

            [TYPE1:9996]
                [ITEM:T1]
                [ITEM:T2]

            [TYPE1:REF2]
                [ITEM:T2]
            ",
        )
        .add_test_lexer_diagnostics_codes(vec![])
        .add_test_lexer_diagnostics_ranges(vec![]),
    )
    .add_test_structure(DebugRaw {
        header: "".to_owned(), // TODO this should still be correctly deserialized. See #20
        token_structure: vec![],
    })
    .add_test_syntax_diagnostics_codes(vec![
        "wrong_arg_type",
        "token_is_missing",
        "token_not_expected",
        "unchecked_code",
    ])
    .add_test_syntax_diagnostics_ranges(vec![
        Range {
            start: Position {
                line: 3,
                character: 19,
            },
            end: Position {
                line: 3,
                character: 23,
            },
        },
        Range {
            start: Position {
                line: 4,
                character: 16,
            },
            end: Position {
                line: 4,
                character: 25,
            },
        },
        Range {
            start: Position {
                line: 4,
                character: 16,
            },
            end: Position {
                line: 4,
                character: 25,
            },
        },
        Range {
            start: Position {
                line: 4,
                character: 25,
            },
            end: Position {
                line: 9,
                character: 12,
            },
        },
    ])
    .run_test();
}

/// Testing when `primary_token` has an error how it handles next tokens
/// (Regression test for issue #36)
#[test]
fn primary_token_error_2() {
    SyntaxTestBuilder::from_lexer_test_builder(
        LexerTestBuilder::test_source(
            "h
            [MAIN:TYPE1]

            TYPE1:REF]
                [ITEM:T1]

            [MAIN:TYPE2]

            [BIPEDAL:MONSTER]
                [NAME:test]
                [EDUCATION:inn:beer:5]
            ",
        )
        .add_test_lexer_diagnostics_codes(vec!["unexpected_end_bracket"])
        .add_test_lexer_diagnostics_ranges(vec![Range {
            start: Position {
                line: 3,
                character: 21,
            },
            end: Position {
                line: 3,
                character: 22,
            },
        }]),
    )
    .add_test_structure(DebugRaw {
        header: "".to_owned(), // TODO this should still be correctly deserialized. See #20
        token_structure: vec![],
    })
    .add_test_syntax_diagnostics_codes(vec![
        "token_is_missing",
        "token_not_expected",
        "unchecked_code",
    ])
    .add_test_syntax_diagnostics_ranges(vec![
        Range {
            start: Position {
                line: 4,
                character: 16,
            },
            end: Position {
                line: 4,
                character: 25,
            },
        },
        Range {
            start: Position {
                line: 4,
                character: 16,
            },
            end: Position {
                line: 4,
                character: 25,
            },
        },
        Range {
            start: Position {
                line: 4,
                character: 25,
            },
            end: Position {
                line: 11,
                character: 12,
            },
        },
    ])
    .run_test();
}

/// Testing when `primary_token` has an error how it handles next tokens
/// (Regression test for issue #36)
#[test]
fn primary_token_error_3() {
    SyntaxTestBuilder::from_lexer_test_builder(
        LexerTestBuilder::test_source(
            "h
            [MAIN:TYPE1]

                [ITEM:T1]

            [MAIN:TYPE2]

            [BIPEDAL:MONSTER]
                [NAME:test]
                [EDUCATION:inn:beer:5]
            ",
        )
        .add_test_lexer_diagnostics_codes(vec![])
        .add_test_lexer_diagnostics_ranges(vec![]),
    )
    .add_test_structure(DebugRaw {
        header: "".to_owned(), // TODO this should still be correctly deserialized. See #20
        token_structure: vec![],
    })
    .add_test_syntax_diagnostics_codes(vec![
        "token_is_missing",
        "token_not_expected",
        "unchecked_code",
    ])
    .add_test_syntax_diagnostics_ranges(vec![
        Range {
            start: Position {
                line: 3,
                character: 16,
            },
            end: Position {
                line: 3,
                character: 25,
            },
        },
        Range {
            start: Position {
                line: 3,
                character: 16,
            },
            end: Position {
                line: 3,
                character: 25,
            },
        },
        Range {
            start: Position {
                line: 3,
                character: 25,
            },
            end: Position {
                line: 10,
                character: 12,
            },
        },
    ])
    .run_test();
}