padz 0.8.1

A fast, project-aware scratch pad for the command line
Documentation
#!/usr/bin/env python3
import argparse
import json
import re
from collections import defaultdict
from typing import Dict, List


def parse_loc_report(
    tmpfile_path: str, workspaces: List[str] | None = None
) -> Dict[str, object]:
    """Parse the LOC report generated by rust-code-analysis."""
    with open(tmpfile_path, "r", encoding="utf-8") as f:
        content = f.read()

    files: List[Dict[str, int | str]] = []
    current_file: str | None = None
    current_main_code = 0
    current_test_code = 0

    for line in content.split("\n"):
        # Skip summary sections at the end
        if "File count:" in line:
            break

        if line.startswith("File name:"):
            if current_file is not None:
                files.append(
                    {
                        "name": current_file,
                        "code": current_main_code,
                        "tests": current_test_code,
                        "total": current_main_code + current_test_code,
                    }
                )

            match = re.match(r"File name: \.(/.*)", line)
            if match:
                current_file = match.group(1)
                current_main_code = 0
                current_test_code = 0
        elif current_file and line.startswith("Main") and "|" in line:
            parts = [p.strip() for p in line.split("|")]
            if len(parts) >= 2:
                try:
                    current_main_code = int(parts[1])
                except ValueError:
                    current_main_code = 0
        elif current_file and line.startswith("Tests") and "|" in line:
            parts = [p.strip() for p in line.split("|")]
            if len(parts) >= 2:
                try:
                    current_test_code = int(parts[1])
                except ValueError:
                    current_test_code = 0

    if current_file is not None:
        files.append(
            {
                "name": current_file,
                "code": current_main_code,
                "tests": current_test_code,
                "total": current_main_code + current_test_code,
            }
        )
    if workspaces:
        normalized_ws = [_normalize_workspace_path(ws) for ws in workspaces]
        filtered_files = []
        for f in files:
            normalized_name = _normalize_file_path(str(f["name"]))
            for ws in normalized_ws:
                if ws == ".":
                    filtered_files.append(f)
                    break
                if normalized_name == ws or normalized_name.startswith(f"{ws}/"):
                    filtered_files.append(f)
                    break
        files = filtered_files
    files.sort(key=lambda f: f["name"])

    total_code = sum(f["code"] for f in files)
    total_tests = sum(f["tests"] for f in files)
    total_all = total_code + total_tests

    directories = _aggregate_directories(files)

    return {
        "files": files,
        "directories": directories,
        "summary": {
            "file_count": len(files),
            "total_code": total_code,
            "total_tests": total_tests,
            "total_all": total_all,
        },
    }


def _aggregate_directories(
    files: List[Dict[str, int | str]],
) -> List[Dict[str, int | str]]:
    dir_totals: Dict[str, Dict[str, int]] = defaultdict(lambda: {"code": 0, "tests": 0})
    for f in files:
        path_parts = str(f["name"]).replace("./", "").split("/")
        if path_parts and not path_parts[0]:
            path_parts = path_parts[1:]
        for i in range(1, len(path_parts)):
            dir_path = "/".join(path_parts[:i])
            dir_totals[dir_path]["code"] += int(f["code"])
            dir_totals[dir_path]["tests"] += int(f["tests"])

    sorted_dirs = sorted(dir_totals.items(), key=lambda item: item[0])

    return [
        {
            "directory": directory,
            "display_name": directory.split("/")[-1] + "/",
            "depth": max(len(directory.split("/")) - 1, 0),
            "code": totals["code"],
            "tests": totals["tests"],
            "total": totals["code"] + totals["tests"],
        }
        for directory, totals in sorted_dirs
    ]


def render_terminal(data: Dict[str, object]) -> None:
    files = data["files"]
    summary = data["summary"]
    directories = data["directories"]

    print("# Rust LOC Report")
    print()
    print("Here's the table showing LOC counts for all files:")
    print()
    print("| File name | Code | Tests | Total |")
    print("| --- | ---: | ---: | ---: |")
    for f in files:
        name = str(f["name"]).replace("./", "")
        if name.startswith("/"):
            name = name[1:]
        if len(name) > 60:
            name = name[:57] + "..."
        print(f"| {name} | {int(f['code']):>4} | {int(f['tests']):>4} | {int(f['total']):>4} |")

    print(
        f"| TOTAL ({summary['file_count']} files) | "
        f"{summary['total_code']:>4} | {summary['total_tests']:>4} | {summary['total_all']:>4} |"
    )
    print()
    print()
    print("## Directory Aggregation")
    print()
    print("| Directory | Code | Tests | Total |")
    print("| --- | ---: | ---: | ---: |")
    for d in directories:
        depth = int(d.get("depth", 0))
        base_name = str(d.get("display_name", f"{d['directory']}/"))
        if depth > 0:
            indent = " " * (4 * depth)
            name = f"`{indent}{base_name}`"
        else:
            name = base_name
        print(
            f"| {name:<30} | {int(d['code']):>4} | "
            f"{int(d['tests']):>4} | {int(d['total']):>4} |"
        )

    print(
        f"| TOTAL (aggregated by directory) | {summary['total_code']:>4} | "
        f"{summary['total_tests']:>4} | {summary['total_all']:>4} |"
    )


def render_json(data: Dict[str, object]) -> None:
    print(json.dumps(data, indent=2))


def _normalize_workspace_path(workspace: str) -> str:
    workspace = workspace.strip()
    if workspace in {"", ".", "./"}:
        return "."
    while workspace.startswith("./"):
        workspace = workspace[2:]
    return workspace.rstrip("/")


def _normalize_file_path(name: str) -> str:
    normalized = name.strip()
    while normalized.startswith("./"):
        normalized = normalized[2:]
    return normalized.lstrip("/")


def build_arg_parser() -> argparse.ArgumentParser:
    parser = argparse.ArgumentParser(description="Render LOC summary report.")
    parser.add_argument("tmpfile", help="Temporary file produced by rust-code-analysis")
    parser.add_argument(
        "--format",
        choices=["term", "json"],
        default="term",
        help="Output format (default: term)",
    )
    parser.add_argument(
        "--workspaces",
        nargs="+",
        help="Optional list of workspace paths to filter by",
    )
    return parser


def main() -> None:
    parser = build_arg_parser()
    args = parser.parse_args()
    data = parse_loc_report(args.tmpfile, workspaces=args.workspaces)

    if args.format == "json":
        render_json(data)
    else:
        render_terminal(data)


if __name__ == "__main__":
    main()