trivet 3.1.0

The trivet Parser Library
Documentation
#!/usr/bin/env python3

"""Check the JSON validator against many known samples."""

import glob
import os
import subprocess  # nosec B404
import sys
from typing import Optional

from debug import debug, error

# Get the current script folder.
script_folder = os.path.dirname(os.path.abspath(sys.argv[0]))
if script_folder is None:
    error("Unable to get script folder; cannot proceed")
    sys.exit(1)
debug(f"script_folder = {script_folder}")

# Get the test folder.
test_folder = script_folder + "/json_test_cases"
if not os.path.isdir(test_folder):
    error(f"Cannot locate test folder.  Looking here: {test_folder}")
    sys.exit(1)
debug(f"test_folder = {test_folder}")

# Get the validator.
found_validator: Optional[str] = None
validator_paths = [
    script_folder + "/../target/release/examples/json_validator",
    script_folder + "/../target/release/examples/json_validator.exe",
    script_folder + "/../target/debug/examples/json_validator",
    script_folder + "/../target/debug/examples/json_validator.exe",
]
for found_validator in validator_paths:
    if os.path.isfile(found_validator):
        debug(f"validator = {found_validator}")
        break
if found_validator is None:
    error(f"Cannot find json_validator.  Looking here: {found_validator}")
    sys.exit(1)
else:
    validator = found_validator


def main() -> int:
    """Check the JSON validator against samples in the test folder."""
    with open("json-report.md", "wt", encoding="utf8") as report:
        print("# JSON Validation Report", flush=True, file=report)
        errors = 0
        count = 0
        for sample in glob.glob("n_*", root_dir=test_folder):
            debug(f"Testing sample {sample}\n")
            print(f"## `{sample}\n", flush=True, file=report)
            count += 1
            print('Output:\n"""text', flush=True, file=report)
            # We are explicitly running the validator we built.
            retval = subprocess.call(
                [validator, test_folder + "/" + sample],  # nosec B603
                stderr=report,
                stdout=report,
            )
            report.flush()
            print('"""\n', flush=True, file=report)
            if retval == 0:
                error(f"Sample {sample} passed but should have failed")
                print("**FAILED**\n", flush=True, file=report)
                errors += 1
        for sample in glob.glob("y_*", root_dir=test_folder):
            debug(f"Testing sample {sample}\n")
            print(f"## `{sample}\n", flush=True, file=report)
            count += 1
            print('Output:\n"""text', flush=True, file=report)
            # We are explicitly running the validator we built.
            retval = subprocess.call(
                [validator, test_folder + "/" + sample],  # nosec B603
                stderr=report,
                stdout=report,
            )
            report.flush()
            print('"""\n', flush=True, file=report)
            if retval != 0:
                error(f"Sample {sample} failed but should have passed")
                print("**FAILED**\n", flush=True, file=report)
                errors += 1
    if errors > 0:
        error(f"Encountered {errors} error(s) in {count} case(s)")
        return 1
    print(f"Ran {count} case(s); no errors detected")
    print(
        "Be sure to check json-report.md to make sure the error messages are accurate."
    )
    return 0


if __name__ == "__main__":
    print("Testing the JSON parser")
    main()