# MCP Agent Toolkit - Root Workspace Makefile
# Pragmatic AI Labs
# https://paiml.com
#
# โ ๏ธ IMPORTANT: This is a RUST WORKSPACE PROJECT with a SINGLE root Makefile!
#
# WORKSPACE STRUCTURE:
# - Root workspace: Cargo.toml (workspace configuration)
# - Server project: Cargo.toml (main binary crate)
# - Future projects: client/, shared/ (when implemented)
#
# This root Makefile is the SINGLE SOURCE OF TRUTH for all operations:
# - All CI/CD operations (GitHub Actions use this Makefile)
# - Development commands (format, lint, test, build)
# - Installation and deployment
# - Cross-workspace operations
# - Toyota Way Kaizen continuous improvement
#
# No individual project Makefiles exist - everything is consolidated here.
# All server-specific targets are prefixed with `server-` (e.g., `server-build-binary`).
#
# This design eliminates confusion and ensures consistent behavior across all environments.
# Disable built-in implicit rules for faster make execution (bashrs lint compliance)
.SUFFIXES:
# Delete partially-built files on error for safety (bashrs lint compliance)
.DELETE_ON_ERROR:
.PHONY: all validate format lint lint-main check test test-doc test-fast coverage coverage-ci coverage-summary coverage-open coverage-clean clean-coverage build release clean clean-tmp install install-latest reinstall status check-rebuild uninstall help format-scripts lint-scripts check-scripts test-scripts lint-makefile fix validate-docs ci-status validate-naming validate-book context setup audit docs run-mcp run-mcp-test test-actions install-act check-act deps-validate dogfood dogfood-ci update-rust-docs size-report size-track size-check size-compare test-all-interfaces test-feature-all-interfaces test-interface-consistency benchmark-all-interfaces load-test-interfaces context-json context-sarif context-llm context-legacy context-benchmark analyze-top-files analyze-composite analyze-health-dashboard profile-binary-performance profile-deep-context analyze-memory-usage analyze-scaling kaizen test-slow-integration test-safe test-dogfood test-critical-scripts coverage-scripts test-workflow-dag test-workflow-dag-verbose context-root context-simple context-json-root context-benchmark-legacy local-install server-build-binary server-build-docker server-run-mcp server-run-mcp-test server-benchmark server-test server-test-all server-outdated server-tokei build-target cargo-doc cargo-geiger update-deps update-deps-aggressive update-deps-security upgrade-deps audit-fix benchmark coverage-report outdated test-all-features clippy-strict server-build-release create-release test-curl-install cargo-rustdoc install-dev-tools tokei quickstart context-fast clear-swap config-swap overnight-improve overnight-monitor overnight-swap-cron test-unit test-services test-protocols test-e2e test-performance test-property test-property-slow test-all test-stratified coverage-stratified crate-release crate-docs dev commit sprint-close setup-quality quality-gate-full help-toyota-way test-examples examples example clean-quick clean-deep validate-doc-links validate-contracts release-dry release-verify coverage-fast coverage-invalidate coverage-full check-install
# Define sub-projects
# NOTE: client project will be added when implemented
PROJECTS = server
# Default property-based testing parameters (CB-126-D compliance)
export PROPTEST_CASES ?= 256
export QUICKCHECK_TESTS ?= 100
# Scripts directory path
SCRIPTS_DIR = scripts
# Coverage exclusions: minimal set for honest measurement (CB-125-B compliant).
# Test files, benchmarks, examples, fixtures, binary entry point.
# Network-dependent: mcp modules (require live server connections).
# All other modules use source-level #[coverage(off)] for transparent exclusion.
COVERAGE_EXCLUDE := --ignore-filename-regex='(_tests?\\.rs|/(tests|benches|examples|fixtures)/|main\\.rs|/mcp[^/]*/|/provable-contracts/)'
# Default target: format and build all projects
all: format build
# Validate everything passes across all projects
validate: check lint test-fast
@echo "โ
All projects validated! All checks passed:"
@echo " โ Type checking (cargo check)"
@echo " โ Linting (cargo clippy)"
@echo " โ Fast testing (cargo nextest)"
@echo " โ Ready for build!"
# Format code in all projects
format:
@echo "๐ Formatting Rust code..."
@cargo fmt --manifest-path Cargo.toml
@echo "โ
Formatting completed successfully!"
# Fix all formatting and linting issues automatically
fix: format
@echo "๐ง Auto-fixing all formatting and lint issues..."
@echo "โ
All fixable issues have been resolved!"
# Run linting in all projects
# Production code: no unwrap allowed (use expect instead). Tests: unwrap allowed.
# PMAT_FAST_BUILD=1 skips heavy build.rs operations for faster iteration
# Target: <30 seconds (incremental)
lint:
@echo "๐ Linting Rust production code..."
@PMAT_FAST_BUILD=1 cargo clippy --manifest-path Cargo.toml --lib --bins -- -D warnings
@echo "โ
All linting checks passed!"
# Lint only main code (skip tests)
lint-main:
@echo "๐ Linting Rust library and binaries..."
@PMAT_FAST_BUILD=1 cargo clippy --manifest-path Cargo.toml --lib --bins -- -D warnings -D clippy::cargo -A clippy::multiple-crate-versions -A clippy::uninlined-format-args
@echo "โ
Main code linting passed!"
# Type check all projects
# Note: --all-features includes "broken-tests" which enables known-broken split test files
# So we check: (1) lib with all features, (2) all targets without broken-tests
check:
@echo "โ
Type checking Rust code..."
@cargo check --manifest-path Cargo.toml --all-features
@cargo check --manifest-path Cargo.toml --all-targets
@echo "โ
All type checks passed!"
# Fast tests without coverage (optimized for speed) - Test execution MUST complete under 5 minutes
# Following bashrs pattern: cargo-nextest + PROPTEST_CASES + parallel execution
# Toyota Way: cargo-nextest AUTOMATICALLY SKIPS #[ignore] tests by default
test-fast: ## Run ALL 20k lib tests via nextest in ~2.5 min
@echo "โก Running all lib tests via nextest (target: <3 min)..."
@PMAT_FAST_BUILD=1 PROPTEST_CASES=5 RUST_MIN_STACK=33554432 cargo nextest run \
--manifest-path Cargo.toml \
--lib \
--no-fail-fast \
-E 'not test(/test_calculator_calculate_current_dir|test_cli_mode_list_templates|test_cli_generate_validation_error|test_cli_search_templates/)'
test-lib: ## Run all lib tests (8MB stack for Clap tests)
@echo "๐งช Running all lib tests..."
@PROPTEST_CASES=5 RUST_MIN_STACK=8388608 cargo test \
--manifest-path Cargo.toml \
--lib \
-- --test-threads=$$(nproc) \
--skip libsql \
--skip cli_integration_tests \
2>&1 | tail -20
# Run ALL tests (unit + integration) - slower but comprehensive
test-all:
@echo "โก Running ALL tests (unit + 171 integration binaries)..."
@if ! command -v cargo-nextest >/dev/null 2>&1; then \
echo "๐ฆ Installing cargo-nextest for optimal performance..."; \
cargo install cargo-nextest || exit 1; \
fi
@echo "๐จ Compiling all tests (no timeout)..."
@cargo nextest run --no-run --workspace --features skip-slow-tests --profile fast
@echo "๐งช Running all tests (5-minute timeout)..."
@timeout 300 cargo nextest run --no-fail-fast --workspace --features skip-slow-tests --profile fast
@echo "โ
All tests completed!"
# Pre-commit fast tests (type checking only) - Target <30s, allows 60s for build scripts
test-pre-commit-fast:
@echo "โก Running pre-commit fast validation (<60s with build scripts)..."
@echo " (Type checking only - no test execution)"
@timeout 60 cargo check --workspace
@echo "โ
Pre-commit validation completed!"
# Stratified test targets for distributed test architecture
test-unit:
@echo "๐ Running unit tests (<10s feedback)..."
@CORES=$$(nproc) && THREADS=$$((CORES > 2 ? CORES - 2 : 1)) && \
PROPTEST_CASES=2 cargo test --test unit_core -- --test-threads=$${THREADS}
@echo "โ
Unit tests completed!"
test-services:
@echo "๐ง Running service integration tests (<30s)..."
@PROPTEST_CASES=2 cargo test --test services_integration --features integration-tests -- --test-threads=4
@echo "โ
Service tests completed!"
test-protocols:
@echo "๐ Running protocol adapter tests (<45s)..."
@PROPTEST_CASES=2 cargo test --test protocol_adapters --features integration-tests -- --test-threads=2
@echo "โ
Protocol tests completed!"
test-e2e:
@echo "๐ฏ Running end-to-end system tests (<120s)..."
@PROPTEST_CASES=2 cargo test --test e2e_system --features e2e-tests -- --test-threads=1
@echo "โ
E2E tests completed!"
test-performance:
@echo "๐ Running performance regression tests..."
@PROPTEST_CASES=2 cargo test --test performance_regression --features perf-tests -- --test-threads=1
@echo "โ
Performance tests completed!"
test-property:
@echo "๐ฒ Running property-based tests..."
@if [ -z "$${PROPTEST_THREADS}" ]; then \
THREADS=$$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4); \
else \
THREADS=$${PROPTEST_THREADS}; \
fi && \
echo " Running all property test modules with $${THREADS} threads..." && \
echo " (Override with PROPTEST_THREADS=n make test-property)" && \
echo " Note: Slow cache tests are skipped. Run 'make test-property-slow' to include them." && \
PROPTEST_CASES=2 timeout 180 cargo test --manifest-path Cargo.toml --lib -- property_tests --test-threads=$${THREADS} || echo "โ ๏ธ Some property tests timed out after 3 minutes" && \
PROPTEST_CASES=2 timeout 60 cargo test --manifest-path Cargo.toml --lib -- prop_ --test-threads=$${THREADS} || echo "โ ๏ธ Some prop tests timed out"
@echo "โ
Property tests completed!"
# Run property tests including slow ones
test-property-slow:
@echo "๐ Running ALL property-based tests (including slow ones)..."
@if [ -z "$${PROPTEST_THREADS}" ]; then \
THREADS=$$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4); \
else \
THREADS=$${PROPTEST_THREADS}; \
fi && \
echo " Running with $${THREADS} threads..." && \
PROPTEST_CASES=2 cargo test --manifest-path Cargo.toml --lib -- property_tests --test-threads=$${THREADS} --include-ignored && \
PROPTEST_CASES=2 cargo test --manifest-path Cargo.toml --lib -- prop_ --test-threads=$${THREADS} --include-ignored && \
PROPTEST_CASES=2 cargo test --manifest-path Cargo.toml --test refactor_auto_property_integration -- --test-threads=$${THREADS}
@echo "โ
All property tests completed (including slow tests)!"
# ==============================================================================
# Mutation Testing (Dual Strategy: PMAT + cargo-mutants)
# ==============================================================================
# Sprint 60: Enhanced Coverage via Mutation Testing
# Documentation: docs/sprints/SPRINT-60-DUAL-MUTATION-STRATEGY.md
#
# PMAT Mutation Testing: Fast AST-based, multi-language, ML-powered
# cargo-mutants: Industry standard Rust validation
# ==============================================================================
# Quick PMAT mutation test (high-value targets, daily use)
test-mutation-pmat-quick:
@echo "๐งฌ Running PMAT mutation testing (quick mode)..."
@echo " Target: High-value security-critical modules"
@if ! command -v pmat >/dev/null 2>&1; then \
echo "โ PMAT binary not found. Build with 'make release' first."; \
exit 1; \
fi
@echo " Testing path_validator.rs (security-critical)..."
@./target/release/pmat analyze mutation \
--file src/utils/path_validator.rs \
--timeout 60 \
--format json \
--output mutation_results/pmat_path_validator.json || true
@echo " Testing calculator.rs (TDG business logic)..."
@./target/release/pmat analyze mutation \
--file src/quality/calculator.rs \
--timeout 60 \
--format json \
--output mutation_results/pmat_calculator.json || true
@echo "โ
PMAT quick mutation tests completed!"
@echo "๐ Results: mutation_results/pmat_*.json"
# Full PMAT mutation test (all modules, weekly/pre-release)
test-mutation-pmat-full:
@echo "๐งฌ Running PMAT mutation testing (full mode)..."
@echo " Target: All server modules with ML prioritization"
@if ! command -v pmat >/dev/null 2>&1; then \
echo "โ PMAT binary not found. Build with 'make release' first."; \
exit 1; \
fi
@mkdir -p mutation_results
@WORKERS=$$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4) && \
echo " Using $${WORKERS} workers for distributed execution..." && \
./target/release/pmat analyze mutation \
--path src/ \
--workers "$${WORKERS}" \
--ml-prioritize \
--timeout 300 \
--format json \
--output mutation_results/pmat_full_report.json
@echo "โ
PMAT full mutation testing completed!"
@echo "๐ Results: mutation_results/pmat_full_report.json"
# Quick cargo-mutants test (validation, daily use)
test-mutation-cargo-quick:
@echo "๐ฆ Running cargo-mutants (quick mode)..."
@echo " Target: High-value security-critical modules"
@if ! command -v cargo-mutants >/dev/null 2>&1; then \
echo "๐ฆ Installing cargo-mutants..."; \
cargo install cargo-mutants || exit 1; \
fi
@mkdir -p mutation_results
@echo " Testing path_validator.rs..."
@cargo mutants \
--manifest-path Cargo.toml \
--file src/utils/path_validator.rs \
--timeout 60 \
--output mutation_results/cargo_path_validator.txt || true
@echo " Testing calculator.rs..."
@cargo mutants \
--manifest-path Cargo.toml \
--file src/quality/calculator.rs \
--timeout 60 \
--output mutation_results/cargo_calculator.txt || true
@echo "โ
cargo-mutants quick tests completed!"
@echo "๐ Results: mutation_results/cargo_*.txt"
# Full cargo-mutants test (all modules, weekly/pre-release)
test-mutation-cargo-full:
@echo "๐ฆ Running cargo-mutants (full workspace mode)..."
@echo " Target: All Rust source files in workspace"
@if ! command -v cargo-mutants >/dev/null 2>&1; then \
echo "๐ฆ Installing cargo-mutants..."; \
cargo install cargo-mutants || exit 1; \
fi
@mkdir -p mutation_results
@JOBS=$$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4) && \
echo " Using $${JOBS} parallel jobs..." && \
cargo mutants \
--manifest-path Cargo.toml \
--workspace \
--timeout 120 \
--jobs "$${JOBS}" \
--output mutation_results/cargo_full_report.txt
@echo "โ
cargo-mutants full testing completed!"
@echo "๐ Results: mutation_results/cargo_full_report.txt"
# Dual mutation testing (run both PMAT and cargo-mutants, compare results)
test-mutation-dual:
@echo "๐งฌ๐ฆ Running dual mutation testing strategy..."
@echo " Running PMAT mutation testing..."
@$(MAKE) test-mutation-pmat-quick
@echo ""
@echo " Running cargo-mutants validation..."
@$(MAKE) test-mutation-cargo-quick
@echo ""
@echo "๐ Comparing results..."
@if [ -f scripts/compare_mutation_results.sh ]; then \
bash scripts/compare_mutation_results.sh; \
else \
echo "โ ๏ธ Comparison script not found. Install with:"; \
echo " See docs/sprints/SPRINT-60-DUAL-MUTATION-STRATEGY.md"; \
fi
@echo "โ
Dual mutation testing completed!"
# CI mutation testing (5-minute budget, critical modules only)
test-mutation-ci:
@echo "๐ Running CI mutation testing (5-minute budget)..."
@echo " Target: Critical security and business logic modules"
@mkdir -p mutation_results
@if ! command -v pmat >/dev/null 2>&1; then \
echo "โ PMAT binary not found. Build with 'make release' first."; \
exit 1; \
fi
@echo " Testing path_validator.rs (30s timeout)..."
@timeout 30 ./target/release/pmat analyze mutation \
--file src/utils/path_validator.rs \
--timeout 30 \
--format json \
--output mutation_results/ci_path_validator.json || echo "โ ๏ธ Timed out"
@echo " Testing calculator.rs (30s timeout)..."
@timeout 30 ./target/release/pmat analyze mutation \
--file src/quality/calculator.rs \
--timeout 30 \
--format json \
--output mutation_results/ci_calculator.json || echo "โ ๏ธ Timed out"
@echo "โ
CI mutation testing completed!"
@echo "๐ Results: mutation_results/ci_*.json"
# Mutation score summary (parse JSON reports)
test-mutation-summary:
@echo "๐ Mutation Testing Summary"
@echo "======================================"
@if [ -d mutation_results ]; then \
echo "PMAT Results:"; \
for file in mutation_results/pmat_*.json; do \
if [ -f "$$file" ]; then \
echo " $$file:"; \
jq -r '.summary // {total_mutants: 0, caught: 0, missed: 0, timeout: 0, score: 0} | " Total: \(.total_mutants) | Caught: \(.caught) | Missed: \(.missed) | Score: \(.score)%"' "$$file" 2>/dev/null || echo " (parsing failed)"; \
fi; \
done; \
echo ""; \
echo "cargo-mutants Results:"; \
for file in mutation_results/cargo_*.txt; do \
if [ -f "$$file" ]; then \
echo " $$file:"; \
grep -E "caught|missed|timeout|score" "$$file" 2>/dev/null | head -5 || echo " (no summary found)"; \
fi; \
done; \
else \
echo "โ No mutation results found. Run 'make test-mutation-dual' first."; \
fi
@echo "======================================"
# Clean mutation testing artifacts
test-mutation-clean:
@echo "๐งน Cleaning mutation testing artifacts..."
@rm -rf mutation_results/
@rm -rf mutants.out/ mutants.out.old/
@echo "โ
Mutation artifacts cleaned!"
.PHONY: test-mutation-pmat-quick test-mutation-pmat-full \
test-mutation-cargo-quick test-mutation-cargo-full \
test-mutation-dual test-mutation-ci \
test-mutation-summary test-mutation-clean
# Run all stratified tests in parallel
test-stratified:
@echo "๐ Running all stratified tests in parallel..."
@$(MAKE) -j4 test-unit test-services test-protocols test-e2e
@echo "โ
All stratified tests completed!"
# Alias for coverage (kept for compatibility)
coverage-stratified: coverage
# Slow integration tests (run separately, not part of fast coverage)
test-slow-integration:
@echo "๐ Running slow integration tests with timeouts..."
@echo "โ ๏ธ These tests may take 5-10 minutes and are not part of fast coverage"
@PROPTEST_CASES=2 cargo test --test slow_integration --release -- --test-threads=1 --ignored
@echo "โ
Slow integration tests completed!"
# Test with manual thread control - use when automatic detection isn't working
test-safe:
@echo "๐ก๏ธ Running tests with manual thread control..."
@THREADS=$${THREADS:-4} && \
echo "๐ Using $${THREADS} threads (override with THREADS=n make test-safe)" && \
PROPTEST_CASES=2 SKIP_SLOW_TESTS=1 RUST_TEST_THREADS=$${THREADS} cargo test --release --workspace --exclude slow_integration -- --test-threads=$${THREADS}
@echo "โ
Safe test run completed!"
# Run tests - ALWAYS FAST (zero tolerance for slow tests) with coverage summary
# Run all examples
test-examples:
@echo "๐ Running all cargo examples..."
@for example in examples/*.rs; do \
if [ -f "$$example" ]; then \
example_name=$$(basename "$$example" .rs); \
echo " Running example: $$example_name"; \
output=$$(cargo run --example "$$example_name" --quiet 2>&1) || { \
if echo "$$output" | grep -q "requires the features"; then \
echo " โญ๏ธ Example $$example_name skipped (requires optional feature)"; \
else \
echo "$$output"; \
echo " โ Example $$example_name failed"; \
exit 1; \
fi \
}; \
fi \
done
@echo "โ
All examples completed successfully!"
# Alias for running examples (cargo run --example)
examples: test-examples
# Run a specific example: make example NAME=complexity_demo
example:
@cargo run --example $(NAME)
# Main test target - runs all required tests
test: test-fast test-doc test-property test-examples
@echo "โ
All tests completed successfully!"
# Run doctests only
test-doc:
@echo "๐ Running doctests..."
@PROPTEST_CASES=2 cargo test --doc --manifest-path Cargo.toml
@echo "โ
Doctests completed!"
# =============================================================================
# INTEGRATION TESTS: CLI handlers, dispatchers (excluded from coverage)
# =============================================================================
# These test the "thin shim" CLI layer that's excluded from coverage measurement.
# Run separately to keep `make coverage` fast while ensuring handlers work.
# =============================================================================
test-integration: ## Run integration tests for CLI handlers (<60s, excluded from coverage)
@echo "๐ Running CLI integration tests (<60s target)..."
@echo " Tests handlers/dispatchers excluded from coverage measurement"
@PROPTEST_CASES=2 RUST_MIN_STACK=33554432 cargo test --lib -- \
cli::handlers::complexity_handlers::tests \
cli::handlers::analysis_handlers::tests \
command_dispatcher::tests \
--test-threads=$$(nproc) \
2>&1 | tail -10
@echo "โ
Integration tests completed!"
# Pre-push validation (called by git hook)
pre-push: test-integration
@echo "โ
Pre-push validation passed"
# Full CI target (coverage + integration)
ci-full: coverage test-integration
@echo "โ
Full CI validation passed"
# =============================================================================
# COVERAGE: nightly + cargo test (1 profraw/binary, no merge needed)
# =============================================================================
# Pattern: clean -> test+report -> summary -> threshold check
# Uses cargo test (1 profraw/binary) โ fast, no profraw merge step
# Nightly required for #[coverage(off)] attribute on test modules
# =============================================================================
COV_THRESHOLD ?= 95
coverage: ## Coverage summary + threshold check (<5 min)
@echo "๐ Running coverage ($(COV_THRESHOLD)%+ threshold)..."
@which cargo-llvm-cov > /dev/null 2>&1 || { cargo install cargo-llvm-cov --locked || exit 1; }
@mkdir -p target/coverage .pmat-metrics
@date +%s%3N > .pmat-metrics/coverage.start
@cargo +nightly llvm-cov clean --workspace
@echo "๐งช Running tests with instrumentation..."
@env RUSTC_WRAPPER= PROPTEST_CASES=2 RUST_MIN_STACK=33554432 cargo +nightly llvm-cov test \
--lib \
--features all-languages \
$(COVERAGE_EXCLUDE) \
-- --test-threads=$$(nproc) \
--skip libsql \
--skip cli_integration_tests \
|| true
@echo "๐ Generating report..."
@cargo +nightly llvm-cov report --summary-only $(COVERAGE_EXCLUDE) | tee target/coverage/summary.txt | grep -E "^TOTAL"
@./scripts/record-metric.sh coverage
@COV_PCT=$$(grep -E '^TOTAL' target/coverage/summary.txt | awk '{n=0; for(i=1;i<=NF;i++){if($$i ~ /[0-9]+\.[0-9]+%/){n++; if(n==3){gsub(/%/,"",$$i);print $$i;exit}}}}'); \
if [ -n "$$COV_PCT" ] && [ $$(echo "$$COV_PCT < $(COV_THRESHOLD)" | bc -l) -eq 1 ]; then \
echo "โ Coverage $${COV_PCT}% is below threshold $(COV_THRESHOLD)%"; \
exit 1; \
else \
echo "โ
Coverage $${COV_PCT}% meets threshold $(COV_THRESHOLD)%"; \
fi
coverage-html: ## Generate HTML report from last coverage run
@echo "๐ Generating HTML report..."
@mkdir -p target/coverage
@cargo +nightly llvm-cov report --html --output-dir target/coverage/html $(COVERAGE_EXCLUDE)
@echo "๐ HTML: target/coverage/html/index.html"
coverage-ci: ## Generate LCOV report for CI (fast mode, --lib only)
@echo "๐ Running CI coverage (--lib only)..."
@echo " - Uses 'cargo test' (1 profraw/binary) for fast merge"
@env RUSTC_WRAPPER= PROPTEST_CASES=2 \
cargo llvm-cov test \
--lib \
--manifest-path Cargo.toml \
--lcov --output-path lcov.info \
$(COVERAGE_EXCLUDE) \
-- --test-threads=$$(nproc) 2>&1 | tail -20
@echo "โ Coverage report: lcov.info"
coverage-summary: ## Show coverage summary
@cargo llvm-cov report --summary-only $(COVERAGE_EXCLUDE) 2>/dev/null || echo "Run 'make coverage' first"
coverage-open: ## Open HTML coverage report in browser
@if [ -f target/coverage/html/index.html ]; then \
xdg-open target/coverage/html/index.html 2>/dev/null || \
open target/coverage/html/index.html 2>/dev/null || \
echo "Open: target/coverage/html/index.html"; \
else \
echo "โ Run 'make coverage' first"; \
fi
coverage-clean: ## Clean coverage artifacts
@rm -f lcov.info target/coverage/lcov.info
@rm -rf target/coverage
@echo "โ Coverage artifacts cleaned"
clean-coverage: coverage-clean ## Alias for coverage-clean
# bashrs-style O(1) cached coverage check (target: <30ms cache hit)
# Uses git tree hash for O(1) lookup, falls back to file hashing if not in git
coverage-fast: ## Fast coverage with cargo test (~2-3 min)
@echo "โก Running fast coverage (lib tests only)..."
@echo " - Uses 'cargo test' (1 profraw/binary) NOT 'nextest' (1 profraw/test)"
@echo " - This reduces 15K profraw files to ~5 files = fast merge"
@cargo llvm-cov clean --workspace 2>/dev/null || true
@env PROPTEST_CASES=3 QUICKCHECK_TESTS=3 \
cargo llvm-cov test --lib \
--no-report \
-- --test-threads=$$(nproc) \
--skip stress --skip fuzz --skip property --skip benchmark \
--skip slow --skip integration --skip e2e --skip comprehensive \
--skip libsql 2>&1 | tail -30
@echo "๐ Generating coverage report..."
@cargo llvm-cov report --summary-only $(COVERAGE_EXCLUDE)
@echo "โก Fast coverage complete"
coverage-invalidate: ## Invalidate coverage cache
@rm -rf .pmat-metrics/coverage
@echo "โ Coverage cache invalidated"
# Quick coverage for fast feedback - bashrs-style (~2-3 min, core tests only)
# Uses --lib and excludes slow tests for maximum speed
# NOTE: Uses 'cargo llvm-cov test' instead of nextest to avoid CB-127-A profraw explosion
coverage-quick: ## Quick coverage for fast feedback (~2-3 min, core only)
@echo "โก Quick coverage (core library tests only)..."
@which cargo-llvm-cov > /dev/null 2>&1 || { cargo install cargo-llvm-cov --locked || exit 1; }
@env PROPTEST_CASES=3 QUICKCHECK_TESTS=3 \
cargo llvm-cov test \
--lib \
$(COVERAGE_EXCLUDE) \
-- --test-threads=$$(nproc) \
--skip stress --skip fuzz --skip property --skip benchmark \
--skip slow --skip integration --skip e2e --skip comprehensive
@cargo llvm-cov report --summary-only $(COVERAGE_EXCLUDE)
@echo ""
@echo "โก Quick coverage complete (use 'make coverage' for full report)"
# Full coverage including ignored tests (for CI/nightly, NOT pre-commit)
# Estimated: ~75-80% coverage vs ~60% fast coverage
# NOTE: Uses 'cargo llvm-cov test' instead of nextest to avoid CB-127-A profraw explosion
coverage-full: ## Full coverage including slow tests (CI/nightly only)
@echo "๐ Running FULL coverage (including ignored tests)..."
@echo "โ ๏ธ This takes 30+ minutes - use coverage-fast for dev workflow"
@env PROPTEST_CASES=25 QUICKCHECK_TESTS=25 cargo llvm-cov test \
--lib \
$(COVERAGE_EXCLUDE) \
-- --test-threads=$$(nproc) --include-ignored
@cargo llvm-cov report --summary-only $(COVERAGE_EXCLUDE)
@echo ""
@echo "๐ Full coverage complete (including slow/ignored tests)"
# Run security audit on all projects
audit:
@echo "๐ Running security audit..."
@# The test requires workspace context, using $(PWD)/.. pattern
@cd $(PWD)/../$(notdir $(PWD)) && cargo audit
@echo "โ
Security audit completed"
# Generate documentation
docs:
@echo "๐ Generating documentation..."
@cargo doc --manifest-path Cargo.toml --all-features --no-deps --open
# Dogfood our own tools to keep README.md updated
dogfood: release
@echo "๐ Dogfooding: Using our own MCP toolkit extensively for analysis and documentation..."
@echo "๐ Phase 1: Comprehensive analysis using the built binary..."
@mkdir -p artifacts/dogfooding
@./target/release/pmat analyze complexity --top-files 10 --format json > artifacts/dogfooding/complexity-$(shell date +%Y-%m-%d).json
@./target/release/pmat analyze churn --days 30 --top-files 10 --format json > artifacts/dogfooding/churn-$(shell date +%Y-%m-%d).json
@./target/release/pmat analyze dag --enhanced --top-files 15 -o artifacts/dogfooding/dag-$(shell date +%Y-%m-%d).mmd
@./target/release/pmat context --format markdown --output artifacts/dogfooding/deep-context-$(shell date +%Y-%m-%d).md
@echo "๐ Phase 2: Updating documentation with binary-generated metrics..."
@deno run --allow-all scripts/dogfood-readme.ts
@echo ""
@echo "โ
Dogfooding complete! README.md updated with fresh binary-generated metrics."
@echo "๐ Check artifacts/dogfooding/ for comprehensive analysis reports"
@echo "๐ All analysis performed using our own built binary"
@echo "๐ก Tip: Run 'git diff README.md' to see what changed"
# Quick dogfood for CI - comprehensive binary testing and metrics
dogfood-ci: release
@echo "๐ CI Dogfooding: Comprehensive testing of our own binary..."
@mkdir -p artifacts/dogfooding
@echo "๐ Generating comprehensive analysis using built binary..."
@./target/release/pmat analyze complexity --top-files 10 --format json > artifacts/dogfooding/complexity-latest.json
@./target/release/pmat analyze churn --days 7 --top-files 10 --format json > artifacts/dogfooding/churn-latest.json
@./target/release/pmat analyze dag --enhanced --top-files 15 -o artifacts/dogfooding/dag-latest.mmd
@./target/release/pmat context --format json --output artifacts/dogfooding/deep-context-latest.json
@echo "๐งช Testing binary performance and interface consistency..."
@time ./target/release/pmat analyze complexity --top-files 5 --format table
@echo "โ
CI dogfooding complete! All metrics generated using our own binary."
@echo "๐ Comprehensive reports saved to artifacts/dogfooding/"
@echo "โก Binary performance validated"
# Update rust-docs with current metrics
update-rust-docs: release
@echo "๐ Updating rust-docs with current metrics..."
@deno run --allow-all scripts/update-rust-docs.ts
@echo "โ
rust-docs updated successfully!"
# Run MCP server
run-mcp:
@echo "๐ Starting MCP server..."
@cargo run --release --manifest-path Cargo.toml
# Run MCP server in test mode
run-mcp-test:
@echo "๐งช Starting MCP server in test mode..."
@cargo run --release --manifest-path Cargo.toml -- --test
# Build all projects (binaries only - no Docker)
build: validate-docs validate-naming validate-book
@echo "๐จ Building server binary..."
@cargo build --manifest-path Cargo.toml
@echo ""
@echo "๐ Updating documentation with current metrics..."
@echo " - Updating rust-docs..."
@$(MAKE) update-rust-docs || true
@echo " - Updating README.md..."
@$(MAKE) dogfood || true
@echo ""
@echo "โ
Build completed successfully!"
@echo " Server binary built (Docker NOT built)."
@echo " Documentation updated with latest metrics."
@echo " To build Docker: make server-build-docker"
# Clean all projects
clean:
@echo "๐งน Cleaning build artifacts..."
@cargo clean --manifest-path Cargo.toml
@rm -rf coverage/ artifacts/ target/
@echo "โ
Clean completed successfully!"
# Quick clean - just this package and incremental
clean-quick:
@echo "๐ Quick clean (package and incremental only)..."
@cargo clean -p pmat --manifest-path Cargo.toml
@rm -rf target/debug/incremental
@rm -rf target/release/incremental
@echo "โ
Quick clean completed!"
# Deep clean - including cargo caches
clean-deep: clean
@echo "๐งน Deep cleaning including cargo caches..."
@rm -rf ~/.cargo/registry/cache/*
@rm -rf ~/.cargo/git/checkouts/*
@rm -rf target/
@echo "๐ Cargo cache size after cleaning:"
@du -sh ~/.cargo/registry/ ~/.cargo/git/ 2>/dev/null || true
@echo "โ
Deep clean completed!"
# Clean /tmp aggressively - remove most temporary files
clean-tmp:
@echo "๐งน Aggressively cleaning /tmp..."
@echo "๐ /tmp usage before cleanup:"
@df -h /tmp
@echo ""
@echo "๐๏ธ Removing ALL temporary files and directories (preserving system essential files)..."
@# Remove all user-owned files first
@find /tmp -user $(shell whoami) -delete 2>/dev/null || true
@echo "๐๏ธ Removing compilation artifacts (all users)..."
@if command -v sudo >/dev/null 2>&1; then \
sudo find /tmp -name "cc*" -type f -delete 2>/dev/null || true; \
sudo find /tmp -name "rust*" -delete 2>/dev/null || true; \
sudo find /tmp -name "*cargo*" -delete 2>/dev/null || true; \
sudo find /tmp -name "tmp*" -type f -delete 2>/dev/null || true; \
sudo find /tmp -name "*.profraw" -delete 2>/dev/null || true; \
sudo find /tmp -name "*.profdata" -delete 2>/dev/null || true; \
sudo find /tmp -name "*.tmp" -delete 2>/dev/null || true; \
sudo find /tmp -name "*.temp" -delete 2>/dev/null || true; \
echo "๐๏ธ Removing old files (older than 1 hour)..."; \
sudo find /tmp -type f -amin +60 -delete 2>/dev/null || true; \
echo "๐๏ธ Removing empty directories..."; \
sudo find /tmp -type d -empty -delete 2>/dev/null || true; \
else \
echo "โ ๏ธ No sudo available - only cleaned user files"; \
fi
@echo ""
@echo "๐ /tmp usage after cleanup:"
@df -h /tmp
@echo "โ
Aggressive /tmp cleanup completed!"
# Clear swap memory (useful between test runs to prevent swap buildup)
clear-swap:
@echo "๐งน Clearing swap memory..."
@if command -v sudo >/dev/null 2>&1; then \
SWAP_USED=$$(free -b | grep Swap | awk '{print $$3}'); \
SWAP_TOTAL=$$(free -b | grep Swap | awk '{print $$2}'); \
if [ $$SWAP_USED -gt 0 ]; then \
echo "๐ Swap status: $$(free -h | grep Swap)"; \
SWAP_PERCENT=$$((SWAP_USED * 100 / SWAP_TOTAL)); \
echo "๐ Swap usage: $$SWAP_PERCENT% ($$(free -h | grep Swap | awk '{print $$3}') used), clearing..."; \
echo "๐พ Syncing filesystems..."; \
sudo sync; \
echo "๐๏ธ Dropping caches..."; \
sudo sh -c "echo 3 > /proc/sys/vm/drop_caches" 2>/dev/null || true; \
echo "๐ Resetting swap..."; \
sudo swapoff -a && sudo swapon -a 2>/dev/null || true; \
echo "โ
Swap cleared!"; \
echo "๐ New swap status: $$(free -h | grep Swap)"; \
else \
echo "โ
No swap in use"; \
fi; \
else \
echo "โ ๏ธ sudo not available - cannot clear swap"; \
fi
# Configure swap size (increase from 512MB to 8GB)
config-swap:
@echo "โ๏ธ Configuring swap size to 8GB..."
@if [ -f "$(SCRIPTS_DIR)/config-swap.ts" ]; then \
echo "๐ Running swap configuration script..."; \
echo " This will:"; \
echo " โข Disable current swap"; \
echo " โข Create new 8GB swapfile"; \
echo " โข Set swappiness to 10"; \
echo " โข Make changes permanent"; \
echo ""; \
echo "โ ๏ธ This requires sudo privileges"; \
sudo "$$(command -v deno)" run --allow-run --allow-read --allow-write $(SCRIPTS_DIR)/config-swap.ts; \
else \
echo "โ Swap configuration script not found at $(SCRIPTS_DIR)/config-swap.ts"; \
echo " Please ensure the script exists before running this target."; \
exit 1; \
fi
# Format TypeScript scripts (excluding archived scripts)
format-scripts:
@echo "๐ Formatting TypeScript scripts (excluding archive)..."
@if [ -d "$(SCRIPTS_DIR)" ]; then \
if [ "$$(find $(SCRIPTS_DIR) -name '*.ts' -type f -not -path '*/archive/*' 2>/dev/null | wc -l)" -gt 0 ]; then \
find $(SCRIPTS_DIR) -name '*.ts' -type f -not -path '*/archive/*' -exec deno fmt --quiet "{}" + 2>/dev/null || echo "โ No TypeScript files found or deno not available"; \
else \
echo "โ No TypeScript scripts to format"; \
fi \
else \
echo "โ Scripts directory not found"; \
fi
# Lint TypeScript scripts (includes type checking, excluding archived scripts)
lint-scripts:
@echo "๐ Linting TypeScript scripts (excluding archive)..."
@if [ -d "$(SCRIPTS_DIR)" ]; then \
if [ "$$(find $(SCRIPTS_DIR) -name '*.ts' -type f -not -path '*/archive/*' 2>/dev/null | wc -l)" -gt 0 ]; then \
find $(SCRIPTS_DIR) -name '*.ts' -type f -not -path '*/archive/*' -exec deno lint --quiet "{}" + 2>/dev/null || echo "โ No TypeScript files found or deno not available"; \
echo "โ
Type checking TypeScript scripts (excluding archive)..."; \
find $(SCRIPTS_DIR) -name '*.ts' -type f -not -path '*/archive/*' -exec deno check "{}" + 2>/dev/null || echo "โ No TypeScript files found or deno not available"; \
else \
echo "โ No TypeScript scripts to lint"; \
fi \
else \
echo "โ Scripts directory not found"; \
fi
# Type check TypeScript scripts (excluding archived scripts)
check-scripts:
@echo "โ
Type checking TypeScript scripts (excluding archive)..."
@if [ -d "$(SCRIPTS_DIR)" ]; then \
if [ "$$(find $(SCRIPTS_DIR) -name '*.ts' -type f -not -path '*/archive/*' 2>/dev/null | wc -l)" -gt 0 ]; then \
find $(SCRIPTS_DIR) -name '*.ts' -type f -not -path '*/archive/*' -exec deno check "{}" + 2>/dev/null || echo "โ No TypeScript files found or deno not available"; \
else \
echo "โ No TypeScript scripts to check"; \
fi \
else \
echo "โ Scripts directory not found"; \
fi
# Lint Makefile (silent mode - only shows summary)
# NOTE: The linter reports many false positive warnings for shell syntax.
# bashrs warnings are non-blocking as they include many style suggestions.
# Use 'cat /tmp/bashrs-makefile.log' for details.
lint-makefile:
@echo "๐ Linting Makefile..."
@if [ -f ./target/release/pmat ]; then \
output="$$(./target/release/pmat analyze makefile Makefile --format human 2>&1)"; \
violations=$$(printf "%s\n" "$${output}" | grep -o "[0-9]* violations" | head -1 || echo "0 violations"); \
echo " pmat: $${violations}"; \
else \
echo " pmat: skipped (release binary not found)"; \
fi
@if command -v bashrs >/dev/null 2>&1; then \
bashrs lint Makefile --ignore MAKE003,MAKE006,MAKE010,MAKE012,MAKE017,MAKE018 > /tmp/bashrs-makefile.log 2>&1 || true; \
warnings=$$(grep -c "\[warning\]" /tmp/bashrs-makefile.log 2>/dev/null || echo "0"); \
echo " bashrs: $${warnings} actionable warnings (see .bashrsignore for intentional suppressions)"; \
if [ "$${warnings}" != "0" ]; then \
cat /tmp/bashrs-makefile.log; \
fi; \
else \
echo " bashrs: skipped (not installed)"; \
fi
@echo "โ
Makefile linting complete!"
# Test TypeScript scripts with coverage
test-scripts:
@rm -rf coverage_deno
@if [ -d "$(SCRIPTS_DIR)" ] && [ "$$(find "$(SCRIPTS_DIR)" -name '*.test.ts' -type f 2>/dev/null | wc -l)" -gt 0 ]; then \
echo "๐งช Testing TypeScript scripts with coverage..."; \
deno test --allow-all --coverage=coverage_deno \
"$(SCRIPTS_DIR)/lib/*.test.ts" \
"$(SCRIPTS_DIR)/*.test.ts"; \
echo ""; \
echo "๐ Coverage Report:"; \
deno coverage coverage_deno; \
else \
echo "โ No TypeScript script tests found"; \
fi
# Test dogfood integration (requires built binary)
test-dogfood: release
@echo "๐ Testing dogfood integration (self-analysis capabilities)..."
@echo "This test verifies our tool can analyze itself and generate valid Mermaid diagrams"
@echo ""
@deno test --allow-all scripts/dogfood-readme-integration.test.ts
@echo ""
@echo "โ
Dogfood integration tests complete!"
# Test critical Deno scripts with coverage
test-critical-scripts:
@echo "๐งช Testing critical Deno scripts with coverage..."
@echo "These scripts are P0 - if they fail, installation/releases break!"
@echo ""
@echo "Testing install.sh wrapper..."
@bash -n $(SCRIPTS_DIR)/install.sh || (echo "โ install.sh has syntax errors!" && exit 1)
@echo "โ
install.sh syntax check passed"
@echo ""
@echo "Testing TypeScript utility modules..."
@rm -rf .coverage
@deno test --coverage=.coverage --allow-read --allow-env --allow-write --allow-run \
$(SCRIPTS_DIR)/lib/create-release-utils.test.ts \
$(SCRIPTS_DIR)/lib/install-utils.test.ts \
$(SCRIPTS_DIR)/lib/create-release-utils-integration.test.ts
@echo ""
@echo "Generating coverage report..."
@deno coverage .coverage --lcov --output=.coverage/lcov.info
@deno coverage .coverage
@echo ""
@echo "Checking coverage thresholds..."
@echo "Target: 80% coverage for critical scripts"
@deno coverage .coverage | grep -E "^All files" || true
@echo ""
@echo "โ
Critical script tests completed!"
# Generate Deno coverage report
coverage-scripts:
@rm -rf coverage_deno
@if [ -d "$(SCRIPTS_DIR)" ] && [ "$$(find $(SCRIPTS_DIR) -name '*.test.ts' -type f 2>/dev/null | wc -l)" -gt 0 ]; then \
echo "๐ Generating TypeScript coverage report..."; \
deno test --allow-all --coverage=coverage_deno $(SCRIPTS_DIR)/**/*.test.ts --quiet; \
echo ""; \
deno coverage coverage_deno; \
echo ""; \
echo "๐ Detailed reports available at:"; \
echo " - LCOV: coverage_deno/lcov.info"; \
echo " - HTML: coverage_deno/html/index.html"; \
else \
echo "โ No TypeScript script tests found"; \
fi
# Clean all coverage artifacts
clean-coverage: coverage-clean
# Validate documentation naming consistency
validate-docs:
@echo "๐ Validating documentation naming consistency..."
@deno run --allow-read --allow-env $(SCRIPTS_DIR)/validate-docs.ts
# Validate documentation links (internal and external)
validate-doc-links:
@echo "๐ Validating documentation links..."
@cargo run --bin pmat -- validate-docs --root docs --fail-on-error
@echo "โ
All documentation links valid!"
# Test GitHub Actions workflow DAG for version mismatches
test-workflow-dag:
@echo "๐ Testing GitHub Actions workflow DAG for version mismatches..."
@deno run --allow-read --allow-write --allow-run --allow-env $(SCRIPTS_DIR)/test-workflow-dag.ts
@echo ""
# Test workflow DAG with verbose output
test-workflow-dag-verbose:
@echo "๐ Testing GitHub Actions workflow DAG (verbose)..."
@deno run --allow-read --allow-write --allow-run --allow-env $(SCRIPTS_DIR)/test-workflow-dag.ts --verbose --scenarios
@echo ""
# Check GitHub Actions CI status
ci-status:
@echo "๐ Checking GitHub Actions CI status..."
@$(SCRIPTS_DIR)/validate-github-actions-status.ts
# Test GitHub Actions workflows locally
test-actions:
@echo "๐งช Testing GitHub Actions workflows locally..."
@if command -v act >/dev/null 2>&1; then \
ACT_CMD=act; \
elif [ -x "/tmp/act" ]; then \
ACT_CMD=/tmp/act; \
elif [ -x "$$HOME/.local/bin/act" ]; then \
ACT_CMD=$$HOME/.local/bin/act; \
else \
echo "โ act is not installed or not in PATH"; \
echo " Run 'make install-act' to install it"; \
exit 1; \
fi; \
echo "Testing auto-tag-release workflow..."; \
"$$ACT_CMD" -W .github/workflows/auto-tag-release.yml workflow_dispatch -P ubuntu-latest=node:20-bullseye --dryrun; \
echo ""; \
echo "Testing ci workflow..."; \
"$$ACT_CMD" -W .github/workflows/ci.yml push -P ubuntu-latest=node:20-bullseye --dryrun; \
echo ""; \
echo "โ
Workflow syntax validation complete!"
# Install act if not present
install-act:
@if ! command -v act >/dev/null 2>&1; then \
echo "๐ฆ Installing act..."; \
mkdir -p ~/.local/bin || exit 1; \
curl -sL https://github.com/nektos/act/releases/latest/download/act_Linux_x86_64.tar.gz | tar xz -C ~/.local/bin || exit 1; \
echo "โ
act installed successfully to ~/.local/bin!"; \
echo "๐ Make sure ~/.local/bin is in your PATH"; \
echo " You can add it with: export PATH=\$$HOME/.local/bin:\$$PATH"; \
else \
echo "โ act is already installed"; \
fi
# Check if act is installed
check-act:
@if ! command -v act >/dev/null 2>&1; then \
if [ -x "/tmp/act" ]; then \
echo "โน๏ธ Found act in /tmp/act but it's not in PATH"; \
echo " You can use it directly: /tmp/act"; \
echo " Or add to PATH: export PATH=/tmp:\$$PATH"; \
elif [ -x "$$HOME/.local/bin/act" ]; then \
echo "โน๏ธ Found act in ~/.local/bin/act but it's not in PATH"; \
echo " Add to PATH: export PATH=\$$HOME/.local/bin:\$$PATH"; \
else \
echo "โ act is not installed. Run 'make install-act' to install it."; \
echo " Or install manually from: https://github.com/nektos/act"; \
fi; \
exit 1; \
fi
# Validate all naming conventions across the project
validate-naming:
@echo "๐ Validating naming conventions..."
@deno run --allow-read --allow-run $(SCRIPTS_DIR)/validate-naming.ts
# Validate uniform contracts across CLI, MCP, and HTTP interfaces
validate-contracts:
@echo "๐ Validating uniform contracts across all interfaces..."
@echo " Checking parameter consistency..."
@cargo test --package pmat --lib contracts::tests --quiet 2>/dev/null || echo " โ ๏ธ Contract tests need implementation"
@echo " Checking for parameter inconsistencies..."
@if grep -q "project_path:" src/cli/commands.rs 2>/dev/null; then \
echo " โ Found 'project_path' - should be 'path' for uniformity"; \
else \
echo " โ
No 'project_path' found - using uniform 'path'"; \
fi
@if grep -E "file:.*Option<PathBuf>" src/cli/commands.rs 2>/dev/null; then \
echo " โ ๏ธ Found single 'file' parameter - consider 'files: Option<Vec<PathBuf>>'"; \
else \
echo " โ
Using uniform file parameters"; \
fi
@echo " โ
Contract validation complete!"
# Validate pmat-book tests (fast, parallel, fail-fast)
# Only runs critical chapters that validate core functionality
# Optimized for pre-commit hooks - typically completes in <30 seconds
validate-book:
@echo "๐ Validating pmat-book (critical chapters only)..."
@bash $(SCRIPTS_DIR)/validate-pmat-book.sh
# Generate comprehensive context with full AST and metrics analysis
context-root: release
@echo "๐ Generating comprehensive deep context analysis..."
@./target/release/pmat context --output deep_context.md
@echo "โ
Context analysis complete: deep_context.md"
# Simpler alternative using zero-config context command
context-simple: release
@echo "๐ Generating context with zero-config auto-detection..."
@./target/release/pmat context --output deep_context.md
# Additional targets for different formats (using auto-detection)
context-json-root: release
@./target/release/pmat context \
--format json \
--output deep_context.json
context-sarif: release
@./target/release/pmat context \
--format sarif \
--output deep_context.sarif
context-llm: release
@./target/release/pmat context \
--format llm-optimized \
--output deep_context_llm.md
# Performance comparison with legacy TypeScript implementation
context-legacy:
@echo "๐ฐ๏ธ Running legacy TypeScript implementation for comparison..."
@deno run --allow-all $(SCRIPTS_DIR)/deep-context.ts -o deep_context_legacy.md
context-benchmark-legacy: release context-legacy
@echo "๐ Performance comparison: New auto-detection vs Legacy TypeScript"
@echo "=== New Implementation (Zero-config auto-detection) ==="
@time ./target/release/pmat context --format markdown --output deep_context_new.md
@echo ""
@echo "=== Legacy Implementation (TypeScript) ==="
@time deno run --allow-all $(SCRIPTS_DIR)/deep-context.ts -o deep_context_legacy_timed.md
@echo ""
@echo "๐ Comparing output sizes..."
@echo "New implementation: $$(wc -c < deep_context_new.md) bytes"
@echo "Legacy implementation: $$(wc -c < deep_context_legacy_timed.md) bytes"
@echo "๐งน Cleaning up comparison files..."
@rm -f deep_context_new.md deep_context_legacy_timed.md deep_context_legacy.md
# Validate dependencies before installation
deps-validate:
@echo "๐ Validating dependencies..."
@cargo tree --duplicate | grep -v "^$$" || echo "โ
No duplicate dependencies"
@cd $(PWD)/../$(notdir $(PWD)) && cargo audit || echo "โ ๏ธ Security issues found"
# Install MCP server
# Local install for development (NO VERSION BUMP) - RECOMMENDED
local-install:
@echo "๐ Installing MCP Agent Toolkit (local development - no version bump)..."
@$(MAKE) -C server local-install
# Install with version bump (FOR RELEASES ONLY)
install:
@echo "๐ Installing MCP Agent Toolkit (WARNING: This bumps version!)..."
@$(MAKE) -C server install || exit 1
# Install latest (check for changes and rebuild if needed)
install-latest:
@echo "๐ Installing latest MCP Agent Toolkit (with auto-rebuild if needed)..."
@$(MAKE) -C server install-latest || exit 1
# Reinstall (force complete reinstall)
reinstall:
@echo "๐ Performing complete reinstall of MCP Agent Toolkit..."
@$(MAKE) -C server reinstall
# Check installation status
status:
@echo "๐ Checking MCP Agent Toolkit status..."
@$(MAKE) -C server status
# Check if rebuild needed
check-rebuild:
@echo "๐ Checking if rebuild is needed..."
@$(MAKE) -C server check-rebuild
# Uninstall MCP server
uninstall:
@echo "๐๏ธ Uninstalling MCP Agent Toolkit..."
@echo "Note: Uninstall functionality moved to installation scripts"
@echo "Visit: https://github.com/paiml/paiml-mcp-agent-toolkit for uninstall instructions"
# Server-specific commands (direct cargo execution)
server-build-binary: ## Build server binary
@echo "๐จ Building server binary..."
@cargo build --release --manifest-path Cargo.toml
server-build-docker: ## Build Docker image
@echo "๐ณ Building Docker image..."
@docker build -t paiml-mcp-agent-toolkit .
server-run-mcp: ## Run MCP server in STDIO mode
@echo "๐ Starting MCP server..."
@cargo run --release --manifest-path Cargo.toml
server-run-mcp-test: ## Run MCP server in test mode
@echo "๐งช Starting MCP server in test mode..."
@cargo run --release --manifest-path Cargo.toml -- --test
server-benchmark: ## Run benchmarks
@echo "โก Running benchmarks..."
@cargo bench --manifest-path Cargo.toml
server-test: ## Run server tests
@echo "๐งช Running server tests..."
@cargo test --manifest-path Cargo.toml
server-test-all: ## Run all server tests with all features
@echo "๐งช Running all server tests..."
@cargo test --all-features --manifest-path Cargo.toml
server-outdated: ## Check outdated dependencies
@echo "๐ฆ Checking outdated dependencies..."
@cargo outdated --format json --manifest-path Cargo.toml
server-tokei: ## Count lines of code for server
@echo "๐ Counting lines of code..."
@tokei src --exclude "*.json"
## Fuzzing targets
.PHONY: fuzz fuzz-all fuzz-coverage fuzz-corpus
fuzz: ## Run fuzzing for Mermaid generation (default 5 minutes)
@deno run --allow-run --allow-read --allow-write --allow-env scripts/run-fuzzing.ts
fuzz-all: ## Run all fuzzers
@deno run --allow-run --allow-read --allow-write --allow-env scripts/run-fuzzing.ts --fuzzer=all
fuzz-coverage: ## Generate fuzzing coverage report
@deno run --allow-run --allow-read --allow-write --allow-env scripts/run-fuzzing.ts --fuzzer=coverage
fuzz-corpus: ## Generate fuzzing corpus
@deno run --allow-write --allow-read scripts/generate-fuzz-corpus.ts
fuzz-%: ## Run specific fuzzer (e.g., make fuzz-mermaid_generation)
@deno run --allow-run --allow-read --allow-write --allow-env scripts/run-fuzzing.ts --fuzzer=fuzz_$*
# Client-specific commands
client-%:
@if [ -f "client/Makefile" ]; then \
$(MAKE) -C client $*; \
else \
echo "Error: client/Makefile not found"; \
exit 1; \
fi
# Build for specific target (for cross-compilation in CI)
# Usage: make build-target TARGET=x86_64-unknown-linux-gnu
# The TARGET variable must be provided by the user
TARGET ?=
build-target:
@if [ -z "$(TARGET)" ]; then \
echo "Error: TARGET not specified"; \
echo "Usage: make build-target TARGET=x86_64-unknown-linux-gnu"; \
exit 1; \
fi
@echo "๐จ Building for target: $(TARGET)"
cargo build --release --target $(TARGET) --manifest-path Cargo.toml
# Run cargo doc
cargo-doc:
cargo doc --features "default,rust-ast,typescript-ast,c-ast,cpp-ast,kotlin-ast,demo" --no-deps --manifest-path Cargo.toml
# Run cargo geiger for security audit
cargo-geiger:
cargo geiger --features "default,rust-ast,typescript-ast,c-ast,cpp-ast,kotlin-ast,demo" --manifest-path Cargo.toml
# Publish crate to crates.io
# Verify build with default features only (simulates `cargo install pmat`)
# Catches missing trait imports that compile locally but fail for users (GH-168)
check-install:
@echo "๐ Verifying build with default features (simulates cargo install pmat)..."
@cargo check --package pmat --no-default-features --features default 2>&1 || \
(echo "โ FAILED: Build with default features failed!"; \
echo " This means 'cargo install pmat' will fail for users."; \
echo " Fix missing imports/features before publishing."; \
exit 1)
@echo "โ
Default-features build OK"
crate-release: check-install
@echo "๐ฆ Publishing pmat to crates.io..."
@echo "Current version: $$(grep '^version' Cargo.toml | cut -d'"' -f2)"
@echo ""
@echo "Pre-publish checklist:"
@echo " โ Version bumped in Cargo.toml"
@echo " โ CHANGELOG updated"
@echo " โ Tests passing (make test)"
@echo " โ Documentation builds (make crate-docs)"
@echo " โ Default-features build (make check-install)"
@echo ""
@printf "Continue with publish? [y/N] "; \
read REPLY; \
case "$$REPLY" in \
[yY]*) cargo publish --package pmat ;; \
*) echo "โ Publish cancelled" ;; \
esac
# Build and verify crate documentation
crate-docs:
@echo "๐ Building crate documentation..."
@echo "Testing with docs.rs configuration..."
RUSTDOCFLAGS="--cfg docsrs" cargo doc --package pmat --no-deps
@echo ""
@echo "โ
Documentation builds successfully!"
@echo "Opening documentation in browser..."
@cargo doc --package pmat --no-deps --open
# Update dependencies
update-deps:
cargo update --manifest-path Cargo.toml
# Update dependencies aggressively beyond semver constraints
update-deps-aggressive:
@echo "๐ Updating dependencies aggressively (requires cargo-edit)..."
@if ! command -v cargo-upgrade &> /dev/null; then \
echo "Installing cargo-edit for dependency upgrade command..."; \
cargo install cargo-edit || exit 1; \
fi
@echo "Step 1: Updating within semver-compatible ranges..."
cargo update --aggressive --manifest-path Cargo.toml
@echo "Step 2: Upgrading to latest incompatible versions (major bumps)..."
cargo upgrade --incompatible --manifest-path Cargo.toml
# Update only security dependencies
update-deps-security:
cd $(PWD)/../$(notdir $(PWD)) && cargo audit fix
# Upgrade dependencies
upgrade-deps:
cargo upgrade --manifest-path Cargo.toml --workspace --to-lockfile
# Fix audit issues
audit-fix:
cd $(PWD)/../$(notdir $(PWD)) && cargo audit fix
# Run benchmarks
benchmark:
@$(MAKE) server-benchmark
# Check outdated dependencies
outdated:
cargo outdated --format json --manifest-path Cargo.toml
# Server outdated (alias for CI) - removed duplicate, see line 550
# Run cargo test with all features
test-all-features:
PROPTEST_CASES=2 cargo test --all-features --manifest-path Cargo.toml
# Server test all (alias for CI) - removed duplicate, see line 546
# Run cargo clippy with warnings as errors
clippy-strict:
cargo clippy --manifest-path Cargo.toml -- -D warnings
# Server build release (for CI)
server-build-release:
cargo build --release --manifest-path Cargo.toml
# Build optimized release binary (workspace-wide)
release:
@mkdir -p .pmat-metrics
@date +%s%3N > .pmat-metrics/build-release.start
@echo "๐ Building optimized release binary for Rust workspace..."
@echo "๐ Workspace structure:"
@echo " - Root workspace: Cargo.toml (workspace configuration)"
@echo " - Server project: Cargo.toml (main binary crate)"
@echo ""
@echo "๐จ Building release binary with workspace optimizations..."
cargo build --release --manifest-path Cargo.toml
@echo ""
@echo "โ
Release binary built successfully!"
@echo "๐ Binary location: ./target/release/pmat"
@echo "๐ Binary size: $$(du -h ./target/release/pmat | cut -f1)"
@./scripts/record-metric.sh build-release
@echo ""
@echo "๐ก Tips for binary size optimization (future improvements):"
@echo " - Strip debug symbols: cargo build --release --config 'profile.release.strip=true'"
@echo " - Enable LTO: cargo build --release --config 'profile.release.lto=true'"
@echo " - Optimize for size: cargo build --release --config 'profile.release.opt-level=\"s\"'"
# Binary size analysis and monitoring
size-report: release ## Generate comprehensive binary size report
@echo "=== Binary Size Report ==="
@ls -lh target/release/pmat
@echo ""
@echo "=== Asset Optimization Status ==="
@if [ -f "assets/vendor/mermaid.min.js.gz" ]; then \
MERMAID_ORIGINAL=$$(curl -sI "https://unpkg.com/mermaid@latest/dist/mermaid.min.js" | grep -i content-length | cut -d' ' -f2 | tr -d '\r'); \
MERMAID_COMPRESSED=$$(stat -f%z assets/vendor/mermaid.min.js.gz 2>/dev/null || stat -c%s assets/vendor/mermaid.min.js.gz); \
if [ -n "$$MERMAID_ORIGINAL" ] && [ "$$MERMAID_ORIGINAL" -gt 0 ]; then \
REDUCTION=$$(echo "scale=1; ($$MERMAID_ORIGINAL - $$MERMAID_COMPRESSED) * 100 / $$MERMAID_ORIGINAL" | bc -l 2>/dev/null || echo "N/A"); \
echo "Mermaid.js: $$MERMAID_ORIGINAL -> $$MERMAID_COMPRESSED bytes ($$REDUCTION% reduction)"; \
else \
echo "Mermaid.js: Compressed to $$MERMAID_COMPRESSED bytes"; \
fi; \
else \
echo "โ Mermaid.js not compressed (run 'make release' to rebuild)"; \
fi
@if [ -f "assets/demo/app.min.js" ]; then \
if [ -f "../assets/demo/app.js" ]; then \
DEMO_JS_ORIGINAL=$$(stat -f%z ../assets/demo/app.js 2>/dev/null || stat -c%s ../assets/demo/app.js); \
DEMO_JS_MINIFIED=$$(stat -f%z assets/demo/app.min.js 2>/dev/null || stat -c%s assets/demo/app.min.js); \
REDUCTION=$$(echo "scale=1; ($$DEMO_JS_ORIGINAL - $$DEMO_JS_MINIFIED) * 100 / $$DEMO_JS_ORIGINAL" | bc -l 2>/dev/null || echo "N/A"); \
echo "Demo JS: $$DEMO_JS_ORIGINAL -> $$DEMO_JS_MINIFIED bytes ($$REDUCTION% reduction)"; \
else \
echo "Demo JS: Minified"; \
fi; \
else \
echo "โ Demo JS not minified (run 'make release' to rebuild)"; \
fi
@echo ""
@echo "=== Size by Crate ==="
@if command -v cargo-bloat >/dev/null 2>&1; then \
cargo bloat --release --crates -n 10 --manifest-path Cargo.toml; \
else \
echo "Install cargo-bloat for detailed analysis: cargo install cargo-bloat"; \
fi
@echo ""
@echo "=== Largest Functions ==="
@if command -v cargo-bloat >/dev/null 2>&1; then \
cargo bloat --release -n 10 --manifest-path Cargo.toml; \
else \
echo "Install cargo-bloat for detailed analysis: cargo install cargo-bloat"; \
fi
size-track: release ## Track binary size over time
@SIZE=$$(stat -f%z target/release/pmat 2>/dev/null || stat -c%s target/release/pmat); \
echo "$$(date +%Y-%m-%d),$${SIZE}" >> size-history.csv; \
echo "Binary size: $${SIZE} bytes"; \
echo "History logged to size-history.csv"
size-check: release ## Check if binary size exceeds threshold
@SIZE=$$(stat -f%z target/release/pmat 2>/dev/null || stat -c%s target/release/pmat); \
THRESHOLD=20971520; \
echo "Binary size: $${SIZE} bytes"; \
echo "Size limit: $${THRESHOLD} bytes (20MB)"; \
if [ $${SIZE} -gt $${THRESHOLD} ]; then \
echo "โ Binary size exceeds 20MB threshold"; \
exit 1; \
else \
echo "โ
Binary size within acceptable limits"; \
fi
size-compare: ## Compare binary size with minimal build
@echo "=== Building with minimal features ==="
@cargo build --release --no-default-features --features rust-only --manifest-path Cargo.toml
@SIZE_MINIMAL=$$(stat -f%z target/release/pmat 2>/dev/null || stat -c%s target/release/pmat); \
echo "Minimal build size: $${SIZE_MINIMAL} bytes"
@echo ""
@echo "=== Building with all features ==="
@$(MAKE) release
@SIZE_FULL=$$(stat -f%z target/release/pmat 2>/dev/null || stat -c%s target/release/pmat); \
echo "Full build size: $${SIZE_FULL} bytes"; \
REDUCTION=$$(echo "scale=1; ($${SIZE_FULL} - $${SIZE_MINIMAL}) * 100 / $${SIZE_FULL}" | bc -l 2>/dev/null || echo "N/A"); \
echo "Feature overhead: $${REDUCTION}%"
# ============================================================================
# CANONICAL VERSION MANAGEMENT
# Following the specification in docs/todo/canonical-version-updates-spec.md
# ============================================================================
.PHONY: pre-release-checks release-patch release-minor release-major release-auto install-release-tools
# Install required release tools
install-release-tools:
@echo "๐ฆ Installing release tools..."
@which cargo-release > /dev/null 2>&1 || { cargo install cargo-release --locked || exit 1; }
@which cargo-semver-checks > /dev/null 2>&1 || { cargo install cargo-semver-checks --locked || exit 1; }
@which cargo-audit > /dev/null 2>&1 || { cargo install cargo-audit --locked || exit 1; }
@which cargo-outdated > /dev/null 2>&1 || { cargo install cargo-outdated --locked || exit 1; }
@echo "โ
Release tools installed"
# Pre-release quality gates
pre-release-checks:
@echo "๐ Running pre-release checks..."
@echo ""
@echo "1๏ธโฃ Cleaning build artifacts for fresh release build..."
@$(MAKE) clean-quick
@echo "โ
Build artifacts cleaned"
@echo ""
@echo "2๏ธโฃ Version consistency check..."
@workspace_version=$$(grep '^version = ' Cargo.toml | cut -d'"' -f2); \
server_uses_workspace=$$(grep '^version.workspace = true' Cargo.toml); \
if [ -n "$$workspace_version" ] && [ -n "$$server_uses_workspace" ]; then \
echo "โ
Versions are consistent (workspace: $$workspace_version, server: uses workspace)"; \
else \
echo "โ Version mismatch detected!"; exit 1; \
fi
@echo ""
@echo "3๏ธโฃ Running quality gates..."
@$(MAKE) lint || (echo "โ Linting failed!" && exit 1)
@$(MAKE) test-fast || (echo "โ Tests failed!" && exit 1)
@echo "โ
Quality gates passed"
@echo ""
@echo "3๏ธโฃ Checking for SATD..."
@./target/debug/pmat analyze satd --strict 2>/dev/null || cargo run --bin pmat -- analyze satd --strict || echo "โ ๏ธ SATD check skipped (pmat not built)"
@echo ""
@echo "4๏ธโฃ Security audit..."
@if [ -d "server" ]; then cargo audit || echo "โ ๏ธ Some vulnerabilities found (review before release)"; else cd .. && cargo audit || echo "โ ๏ธ Some vulnerabilities found (review before release)"; fi
@echo ""
@echo "5๏ธโฃ Checking outdated dependencies..."
@if [ -d "server" ]; then cargo outdated --root-deps-only || true; else cd .. && cargo outdated --root-deps-only || true; fi
@echo ""
@echo "6๏ธโฃ SemVer compatibility check..."
@if [ -d "server" ]; then cargo semver-checks check-release || echo "โ ๏ธ SemVer check completed (review any warnings)"; else cd .. && cargo semver-checks check-release || echo "โ ๏ธ SemVer check completed (review any warnings)"; fi
@echo ""
@echo "โ
All pre-release checks completed!"
# Patch release (x.y.Z) - bug fixes only
release-patch: install-release-tools pre-release-checks
@echo "๐ Creating PATCH release (bug fixes only)..."
@cargo release patch --execute
# Minor release (x.Y.z) - new features, backward compatible
release-minor: install-release-tools pre-release-checks
@echo "๐ Creating MINOR release (new features, backward compatible)..."
@cargo release minor --execute
# Major release (X.y.z) - breaking changes
release-major: install-release-tools pre-release-checks
@echo "๐ Creating MAJOR release (breaking changes)..."
@cargo release major --execute
# Auto-determine version bump based on changes
release-auto: install-release-tools pre-release-checks
@echo "๐ค Auto-determining version bump type..."
@if [ -d "server" ]; then SEMVER_CMD="cargo semver-checks check-release"; else SEMVER_CMD="cd .. && cargo semver-checks check-release"; fi; \
if $$SEMVER_CMD 2>&1 | grep -q "MAJOR"; then \
echo "๐ฅ Breaking changes detected - MAJOR release required"; \
$(MAKE) release-major; \
elif git log --oneline $(shell git describe --tags --abbrev=0 2>/dev/null || echo HEAD~10)..HEAD | grep -qE '^[a-f0-9]+ feat:'; then \
echo "โจ New features detected - MINOR release"; \
$(MAKE) release-minor; \
else \
echo "๐ Bug fixes/patches only - PATCH release"; \
$(MAKE) release-patch; \
fi
# Dry run for release (no actual changes)
release-dry:
@echo "๐งช Dry run for release..."
@cargo release patch --dry-run
# Verify release was successful
release-verify:
@echo "๐ Verifying release..."
@LATEST_TAG=$$(git describe --tags --abbrev=0); \
echo "Latest tag: $$LATEST_TAG"; \
@cargo search pmat | head -1
@echo ""
@echo "๐ฆ Testing installation from crates.io..."
@cargo install pmat --force && pmat --version
@echo "โ
Release verification complete!"
# Create GitHub release with binary artifacts
create-release:
@echo "๐ฆ Creating GitHub release..."
@if command -v deno >/dev/null 2>&1; then \
./scripts/create-release.ts; \
else \
echo "โ Error: Deno is required to create releases"; \
echo "Install Deno from: https://deno.land/"; \
exit 1; \
fi
# Test curl installation
test-curl-install:
@echo "๐งช Testing curl installation..."
@if command -v deno >/dev/null 2>&1; then \
./scripts/test-curl-install.ts; \
else \
echo "โ Error: Deno is required to run tests"; \
echo "Install Deno from: https://deno.land/"; \
exit 1; \
fi
# Check documentation with rustdoc
cargo-rustdoc:
cargo rustdoc --features "default,rust-ast,typescript-ast,c-ast,cpp-ast,kotlin-ast,demo" --manifest-path Cargo.toml -- -D missing_docs || true
# Install development tools
install-dev-tools:
@if ! command -v tokei &> /dev/null; then \
echo "Installing tokei..."; \
cargo install tokei || exit 1; \
fi
@if ! command -v cargo-geiger &> /dev/null; then \
echo "Installing cargo-geiger..."; \
cargo install cargo-geiger || exit 1; \
fi
@if ! command -v cargo-outdated &> /dev/null; then \
echo "Installing cargo-outdated..."; \
cargo install cargo-outdated || exit 1; \
fi
@if ! command -v cargo-edit &> /dev/null; then \
echo "Installing cargo-edit..."; \
cargo install cargo-edit || exit 1; \
fi
@if ! command -v cargo-audit &> /dev/null; then \
echo "Installing cargo-audit..."; \
cargo install cargo-audit || exit 1; \
fi
@if ! command -v cargo-llvm-cov &> /dev/null; then \
echo "Installing cargo-llvm-cov..."; \
cargo install cargo-llvm-cov || exit 1; \
fi
# Count lines of code with tokei
tokei:
tokei src --exclude "*.json"
# Count lines of code for server - removed duplicate, see line 554
# Setup development environment
# NOTE: This does NOT install Docker - Docker is optional for this project
setup:
@echo "๐ง Setting up development environment..."
@echo "Installing Rust toolchain components..."
rustup component add rustfmt clippy
@echo "Installing development tools..."
@which cargo-lambda > /dev/null 2>&1 || { cargo install cargo-lambda || exit 1; }
@which cargo-watch > /dev/null 2>&1 || { cargo install cargo-watch || exit 1; }
@which cargo-audit > /dev/null 2>&1 || { cargo install cargo-audit || exit 1; }
@which cargo-llvm-cov > /dev/null 2>&1 || { cargo install cargo-llvm-cov || exit 1; }
@if command -v deno >/dev/null 2>&1; then \
echo "โ
Deno is already installed"; \
else \
echo "๐ฆ Installing Deno..."; \
curl -fsSL https://deno.land/install.sh -o /tmp/deno-install.sh && sh /tmp/deno-install.sh && rm /tmp/deno-install.sh; \
echo "Please add Deno to your PATH as instructed above"; \
fi
@if command -v shellcheck >/dev/null 2>&1; then \
echo "โ
Shellcheck is already installed"; \
else \
echo "โ ๏ธ Shellcheck is not installed. Install it with:"; \
echo " Ubuntu/Debian: sudo apt-get install shellcheck"; \
echo " macOS: brew install shellcheck"; \
echo " Or visit: https://github.com/koalaman/shellcheck#installing"; \
fi
@echo ""
@echo "โ
Development environment setup complete!"
@echo ""
@echo "๐ Note: Docker is OPTIONAL for this project."
@echo " - The server runs as a standalone binary by default"
@echo " - Docker is only needed if you want containerized deployment"
@echo " - To install Docker separately, visit: https://docs.docker.com/get-docker/"
# Quick start guide
quickstart: setup
@echo ""
@echo "๐ Quick Start Guide"
@echo "==================="
@echo ""
@echo "1. Test the MCP server:"
@echo " make server-run-mcp-test"
@echo ""
@echo "2. In another terminal, test with Claude Code:"
@echo " claude mcp add /path/to/paiml-mcp-agent-toolkit/server"
@echo ""
@echo "3. Generate templates:"
@echo " Use /mcp in Claude Code to see available tools"
@echo ""
# Help command
help:
@echo "MCP Agent Toolkit - Root Project"
@echo "================================"
@echo ""
@echo "Primary targets:"
@echo " all - Format and build all projects (default)"
@echo " validate - Run all checks across projects (check, lint, test)"
@echo " kaizen - Toyota Way continuous improvement (comprehensive quality gates)"
@echo " quickstart - Setup and show quick start guide"
@echo ""
@echo "Development (all projects):"
@echo " format - Format code in all projects"
@echo " fix - Auto-fix all formatting issues (alias for format)"
@echo " lint - Run linters in all projects (checks only)"
@echo " check - Type check all projects"
@echo " test - Run fast tests (ONLY fast tests allowed)"
@echo " test-doc - Run doctests only"
@echo " test-fast - Run fast tests with nextest (<5 min)"
@echo " test-safe - Run tests with manual thread control (THREADS=n)"
@echo " coverage - Generate HTML coverage report (<10 min)"
@echo " coverage-ci - Generate LCOV for CI"
@echo " coverage-open - Open HTML coverage in browser"
@echo " coverage-clean - Clean coverage artifacts"
@echo " audit - Run security audit on all projects"
@echo " docs - Generate and open documentation"
@echo " validate-docs - Check documentation naming consistency"
@echo " validate-naming - Validate naming conventions across the project"
@echo " ci-status - Check GitHub Actions workflow status"
@echo " test-actions - Test GitHub Actions workflows locally with act"
@echo " context - Generate deep context analysis with auto-detection"
@echo " context-json - Generate deep context analysis in JSON format"
@echo " context-sarif - Generate deep context analysis in SARIF format"
@echo " context-llm - Generate LLM-optimized deep context analysis"
@echo " context-benchmark - Compare new vs legacy implementation performance"
@echo " build - Build all projects (binaries only)"
@echo " release - Build optimized release binary (workspace-wide)"
@echo " clean - Clean all build artifacts"
@echo " clean-tmp - Aggressively clean /tmp (removes most temporary files)"
@echo " clear-swap - Clear swap memory (useful between test runs)"
@echo ""
@echo "Distributed Testing (stratified architecture):"
@echo " test-unit - Run unit tests (<10s feedback)"
@echo " test-services - Run service integration tests (<30s)"
@echo " test-protocols - Run protocol adapter tests (<45s)"
@echo " test-e2e - Run end-to-end system tests (<120s)"
@echo " test-performance - Run performance regression tests"
@echo " test-stratified - Run all stratified tests in parallel"
@echo " test-all - Run ALL tests (comprehensive)"
@echo ""
@echo "Documentation:"
@echo " dogfood - Update README.md with current project metrics"
@echo " dogfood-ci - Gather metrics without updating files (for CI)"
@echo " update-rust-docs - Update rust-docs with current performance metrics"
@echo ""
@echo "Running:"
@echo " run-mcp - Run MCP server in STDIO mode"
@echo " run-mcp-test - Run MCP server in test mode"
@echo ""
@echo "Interface Testing (CLI, MCP, HTTP):"
@echo " test-all-interfaces - MANDATORY triple-interface testing"
@echo " test-feature-all-interfaces FEATURE=<name> - Test specific feature across interfaces"
@echo " test-interface-consistency - Validate consistent results across interfaces"
@echo " benchmark-all-interfaces - Performance benchmark across interfaces"
@echo " load-test-interfaces - Load test all interfaces"
@echo ""
@echo "Installation:"
@echo " local-install - Install for development (NO VERSION BUMP) - RECOMMENDED"
@echo " install - Install with version bump (FOR RELEASES ONLY)"
@echo " install-latest - Smart install (rebuild only if source changed)"
@echo " reinstall - Force complete uninstall and reinstall"
@echo " status - Check installation and build status"
@echo " check-rebuild - Check if source files changed (needs rebuild)"
@echo " uninstall - Remove MCP server from system"
@echo ""
@echo "Project-specific commands:"
@echo " server-* - Run any server Makefile target"
@echo " client-* - Run any client Makefile target"
@echo ""
@echo "Examples:"
@echo " make server-run-mcp-test - Run MCP server in test mode"
@echo " make server-build-binary - Build server binary only (no Docker)"
@echo " make server-build-docker - Build Docker image only"
@echo " make client-build - Build client only"
@echo ""
@echo "Enhanced Analysis (using built binary):"
@echo " analyze-top-files - Top files analysis across complexity and churn metrics"
@echo " analyze-composite - Composite analysis combining multiple ranking factors"
@echo " analyze-health-dashboard - Comprehensive project health dashboard"
@echo " profile-binary-performance - Profile binary performance across operations"
@echo " profile-deep-context - Profile deep context creation with detailed timing and annotation analysis"
@echo " analyze-memory-usage - Analyze binary memory usage patterns"
@echo " analyze-scaling - Test binary scaling with different project sizes"
@echo " analyze-satd - Self-admitted technical debt analysis"
@echo " analyze-satd-evolution - SATD evolution tracking over time"
@echo " export-critical-satd - Export critical technical debt in SARIF format"
@echo " satd-metrics - Generate comprehensive SATD metrics"
@echo ""
@echo "Overnight Autonomous Improvement:"
@echo " overnight-improve - Start 8-12 hour autonomous improvement system"
@echo " overnight-monitor - Monitor progress of overnight improvement"
@echo " overnight-swap-cron - Set up cron job for periodic swap clearing"
@echo ""
@echo "Setup:"
@echo " setup - Install all development dependencies"
@echo " install-act - Install act for local GitHub Actions testing"
@echo " help - Show this help message"
@echo ""
@echo "Projects included:"
@for project in $(PROJECTS); do \
echo " - $$project"; \
done
# =============================================================================
# Triple Interface Testing (CLI, MCP, HTTP) - MANDATORY for all development
# =============================================================================
# Session Start Ritual - Test all interfaces with core functionality
test-all-interfaces: release
@echo "๐ MANDATORY TRIPLE-INTERFACE TESTING: CLI, MCP, HTTP"
@echo "๐ As per CLAUDE.md: This project MUST test ALL THREE interfaces continuously"
@echo ""
@echo "๐ Starting HTTP server in background..."
@./target/release/pmat serve --port 8080 &
@HTTP_PID=$$!; \
sleep 3; \
echo ""; \
echo "=== Testing Complexity Analysis Across All Interfaces ==="; \
echo ""; \
echo "๐ฅ๏ธ CLI Interface:"; \
time ./target/release/pmat analyze complexity --top-files 5 --format json > cli-complexity.json; \
echo "CLI Response size: $$(wc -c < cli-complexity.json) bytes"; \
echo ""; \
echo "๐ MCP Interface:"; \
echo '{"jsonrpc":"2.0","method":"analyze_complexity","params":{"project_path":"./","top_files":5,"format":"json"},"id":1}' | \
./target/release/pmat --mode mcp > mcp-complexity.json; \
echo "MCP Response size: $$(wc -c < mcp-complexity.json) bytes"; \
echo ""; \
echo "๐ HTTP Interface:"; \
time curl -sf -X GET "http://localhost:8080/api/v1/analyze/complexity?top_files=5&format=json" > http-complexity.json || exit 1; \
echo "HTTP Response size: $$(wc -c < http-complexity.json) bytes"; \
echo ""; \
echo "โ
All interfaces tested successfully!"; \
echo "๐งน Cleaning up..."; \
kill $$HTTP_PID 2>/dev/null || true; \
rm -f cli-complexity.json mcp-complexity.json http-complexity.json || true
# Test specific feature across all interfaces
# Usage: make test-feature-all-interfaces FEATURE=complexity
# The FEATURE variable must be provided by the user
FEATURE ?=
test-feature-all-interfaces: release
@if [ -z "$(FEATURE)" ]; then \
echo "Error: FEATURE not specified"; \
echo "Usage: make test-feature-all-interfaces FEATURE=complexity"; \
echo "Available features: complexity, churn, dag, context"; \
exit 1; \
fi
@echo "๐งช Testing $(FEATURE) feature across all interfaces..."
@./target/release/pmat serve --port 8080 &
@HTTP_PID=$$!; \
sleep 2; \
case "$(FEATURE)" in \
complexity) \
echo "CLI: ./target/release/pmat analyze complexity --top-files 5"; \
./target/release/pmat analyze complexity --top-files 5 --format table; \
echo "MCP: analyze_complexity method"; \
echo '{"jsonrpc":"2.0","method":"analyze_complexity","params":{"top_files":5},"id":1}' | ./target/release/pmat --mode mcp; \
echo "HTTP: GET /api/v1/analyze/complexity"; \
curl -sf "http://localhost:8080/api/v1/analyze/complexity?top_files=5" || exit 1; \
;; \
churn) \
echo "CLI: ./target/release/pmat analyze churn --days 7"; \
./target/release/pmat analyze churn --days 7 --top-files 5 --format table; \
echo "MCP: analyze_churn method"; \
echo '{"jsonrpc":"2.0","method":"analyze_churn","params":{"days":7,"top_files":5},"id":1}' | ./target/release/pmat --mode mcp; \
echo "HTTP: GET /api/v1/analyze/churn"; \
curl -sf "http://localhost:8080/api/v1/analyze/churn?days=7&top_files=5" || exit 1; \
;; \
context) \
echo "CLI: ./target/release/pmat context"; \
./target/release/pmat context --format json > /tmp/cli_context.json; \
echo "MCP: analyze_context method"; \
echo '{"jsonrpc":"2.0","method":"analyze_context","params":{},"id":1}' | ./target/release/pmat --mode mcp > /tmp/mcp_context.json; \
echo "HTTP: GET /api/v1/context"; \
curl -sf "http://localhost:8080/api/v1/context" > /tmp/http_context.json || exit 1; \
;; \
*) \
echo "Unknown feature: $(FEATURE)"; \
;; \
esac; \
kill $$HTTP_PID 2>/dev/null || true
# Interface consistency validation
test-interface-consistency: release
@echo "๐ Testing interface consistency (same results across CLI/MCP/HTTP)..."
@./target/release/pmat serve --port 8080 &
@HTTP_PID=$$!; \
sleep 3; \
echo "Generating complexity analysis via all interfaces..."; \
./target/release/pmat analyze complexity --top-files 3 --format json > consistency-cli.json; \
echo '{"jsonrpc":"2.0","method":"analyze_complexity","params":{"top_files":3,"format":"json"},"id":1}' | \
./target/release/pmat --mode mcp | jq '.result' > consistency-mcp.json; \
curl -sf "http://localhost:8080/api/v1/analyze/complexity?top_files=3&format=json" > consistency-http.json || exit 1; \
echo "Comparing outputs..."; \
if diff -q consistency-cli.json consistency-mcp.json >/dev/null && \
diff -q consistency-cli.json consistency-http.json >/dev/null; then \
echo "โ
All interfaces return consistent results!"; \
else \
echo "โ ๏ธ Interfaces return different results:"; \
echo "CLI vs MCP:"; \
diff consistency-cli.json consistency-mcp.json || true; \
echo "CLI vs HTTP:"; \
diff consistency-cli.json consistency-http.json || true; \
fi; \
kill $$HTTP_PID 2>/dev/null || true; \
rm -f consistency-cli.json consistency-mcp.json consistency-http.json || true
# Performance benchmark across interfaces
benchmark-all-interfaces: release
@echo "โก Performance benchmarking across all interfaces..."
@./target/release/pmat serve --port 8080 &
@HTTP_PID=$$!; \
sleep 3; \
echo "Benchmarking complexity analysis (5 iterations each):"; \
echo ""; \
echo "CLI Interface:"; \
hyperfine --warmup 2 --min-runs 5 \
"./target/release/pmat analyze complexity --top-files 5 --format json"; \
echo ""; \
echo "MCP Interface:"; \
hyperfine --warmup 2 --min-runs 5 \
"echo '{\"jsonrpc\":\"2.0\",\"method\":\"analyze_complexity\",\"params\":{\"top_files\":5},\"id\":1}' | ./target/release/pmat --mode mcp"; \
echo ""; \
echo "HTTP Interface:"; \
hyperfine --warmup 2 --min-runs 5 \
"curl -s http://localhost:8080/api/v1/analyze/complexity?top_files=5"; \
kill $$HTTP_PID 2>/dev/null || true
# Interface load testing
load-test-interfaces: release
@echo "๐๏ธ Load testing all interfaces..."
@./target/release/pmat serve --port 8080 &
@HTTP_PID=$$!; \
sleep 3; \
echo "HTTP Load Test (100 requests, 10 concurrent):"; \
if command -v ab >/dev/null 2>&1; then \
ab -n 100 -c 10 -k "http://localhost:8080/api/v1/analyze/complexity?top_files=5"; \
else \
echo "โ ๏ธ Apache Bench (ab) not installed. Install with: sudo apt-get install apache2-utils"; \
fi; \
echo ""; \
echo "CLI Parallel Test (10 concurrent processes):"; \
for i in $$(seq 1 10); do \
./target/release/pmat analyze complexity --top-files 5 --format json > /tmp/cli_test_$$i.json & \
done; \
wait; \
echo "โ
CLI parallel test completed"; \
rm -f /tmp/cli_test_*.json || true; \
kill $$HTTP_PID 2>/dev/null || true
# =============================================================================
# Enhanced Analysis Targets Using Built Binary
# =============================================================================
# Top-files ranking analysis across different metrics
analyze-top-files: release
@echo "๐ Top Files Analysis across multiple metrics using built binary..."
@mkdir -p artifacts/analysis
@echo "๐งฎ Complexity Top Files (Top 10):"
@./target/release/pmat analyze complexity --top-files 10 --format table
@echo ""
@echo "๐ฅ Churn Top Files (Top 10, last 30 days):"
@./target/release/pmat analyze churn --days 30 --top-files 10 --format table
@echo ""
@echo "๐พ Saving detailed JSON reports..."
@./target/release/pmat analyze complexity --top-files 15 --format json > artifacts/analysis/top-complexity.json
@./target/release/pmat analyze churn --days 30 --top-files 15 --format json > artifacts/analysis/top-churn.json
@echo "โ
Top files analysis complete! Reports saved to artifacts/analysis/"
# Composite analysis combining multiple ranking factors
analyze-composite: release
@echo "๐ฏ Composite Analysis: Combining complexity, churn, and risk factors..."
@mkdir -p artifacts/analysis
@echo "๐ Generating comprehensive ranking using built binary..."
@./target/release/pmat analyze complexity --top-files 15 --format json > /tmp/complexity_composite.json
@./target/release/pmat analyze churn --days 30 --top-files 15 --format json > /tmp/churn_composite.json
@echo "๐ Cross-referencing high-complexity and high-churn files:"
@echo "Files appearing in both top complexity and top churn:"
@jq -r '.files[] | .file_path' /tmp/complexity_composite.json | sort > /tmp/complexity_files.txt
@jq -r '.hotspots[] | .file_path' /tmp/churn_composite.json | sort > /tmp/churn_files.txt
@comm -12 /tmp/complexity_files.txt /tmp/churn_files.txt | head -10
@echo ""
@echo "๐พ Saving composite analysis to artifacts/analysis/composite-ranking.json"
@echo '{"analysis_type":"composite","generated_at":"'$(shell date -Iseconds)'","components":{"complexity":' > artifacts/analysis/composite-ranking.json
@cat /tmp/complexity_composite.json >> artifacts/analysis/composite-ranking.json
@echo ',"churn":' >> artifacts/analysis/composite-ranking.json
@cat /tmp/churn_composite.json >> artifacts/analysis/composite-ranking.json
@echo '}}' >> artifacts/analysis/composite-ranking.json
@rm -f /tmp/complexity_composite.json /tmp/churn_composite.json /tmp/complexity_files.txt /tmp/churn_files.txt
@echo "โ
Composite analysis complete!"
# Comprehensive project health dashboard
analyze-health-dashboard: release
@echo "๐ฅ Project Health Dashboard using built binary comprehensive analysis..."
@mkdir -p artifacts/dashboard
@echo "๐ Generating comprehensive project health metrics..."
@echo ""
@echo "=== Project Overview ==="
@./target/release/pmat context --format json > artifacts/dashboard/health-context.json
@echo "Context analysis complete โ"
@echo ""
@echo "=== Risk Assessment ==="
@./target/release/pmat analyze complexity --top-files 5 --format table
@echo ""
@echo "=== Recent Activity ==="
@./target/release/pmat analyze churn --days 7 --top-files 5 --format table
@echo ""
@echo "=== Dependency Graph ==="
@./target/release/pmat analyze dag --enhanced --top-files 10 -o artifacts/dashboard/dependency-graph.mmd
@echo "Dependency graph saved to artifacts/dashboard/dependency-graph.mmd โ"
@echo ""
@echo "๐พ Health dashboard artifacts saved to artifacts/dashboard/"
@echo "๐ Key files:"
@echo " - health-context.json (comprehensive context analysis)"
@echo " - dependency-graph.mmd (visual dependency analysis)"
# Binary performance profiling
profile-binary-performance: release
@echo "โก Profiling binary performance across different operations..."
@mkdir -p artifacts/profiling
@echo "๐ Testing startup and analysis performance..."
@echo ""
@echo "=== Binary Startup Performance ==="
@hyperfine --warmup 3 --min-runs 10 \
"./target/release/pmat --version" \
--export-json artifacts/profiling/startup-performance.json
@echo ""
@echo "=== Analysis Performance by Operation ==="
@echo "Complexity Analysis:"
@hyperfine --warmup 2 --min-runs 5 \
"./target/release/pmat analyze complexity --top-files 5 --format json" \
--export-json artifacts/profiling/complexity-performance.json
@echo ""
@echo "Context Generation:"
@hyperfine --warmup 1 --min-runs 3 \
"./target/release/pmat context --format json --output /tmp/context_perf.json" \
--export-json artifacts/profiling/context-performance.json
@rm -f /tmp/context_perf.json
@echo ""
@echo "โ
Performance profiling complete! Reports in artifacts/profiling/"
# Profile deep context creation with detailed timing and memory analysis
profile-deep-context: release
@echo "๐ Profiling deep context creation with detailed analysis..."
@mkdir -p artifacts/profiling
@echo ""
@echo "๐ Deep Context Performance Profile:"
@echo "====================================="
@if command -v /usr/bin/time >/dev/null 2>&1; then \
echo "โฑ๏ธ Timing with memory analysis:"; \
/usr/bin/time -v ./target/release/pmat context --output artifacts/profiling/deep_context_profile.md 2> artifacts/profiling/deep-context-timing.txt; \
echo ""; \
echo "๐ Memory Usage Summary:"; \
grep -E "(Maximum resident|User time|System time|Percent of CPU|Page faults)" artifacts/profiling/deep-context-timing.txt || echo "Time command not available"; \
else \
echo "โฑ๏ธ Basic timing:"; \
time ./target/release/pmat context --output artifacts/profiling/deep_context_profile.md; \
fi
@echo ""
@echo "๐ Output Analysis:"
@if [ -f artifacts/profiling/deep_context_profile.md ]; then \
echo "โ
Generated: artifacts/profiling/deep_context_profile.md"; \
echo "๐ File size: $$(ls -lh artifacts/profiling/deep_context_profile.md | awk '{print $$5}')"; \
echo "๐ Line count: $$(wc -l < artifacts/profiling/deep_context_profile.md) lines"; \
echo "๐ Word count: $$(wc -w < artifacts/profiling/deep_context_profile.md) words"; \
echo ""; \
echo "๐ Content Analysis:"; \
grep -c "\[complexity:" artifacts/profiling/deep_context_profile.md > /tmp/complexity_count 2>/dev/null || echo "0" > /tmp/complexity_count; \
grep -c "\[cognitive:" artifacts/profiling/deep_context_profile.md > /tmp/cognitive_count 2>/dev/null || echo "0" > /tmp/cognitive_count; \
grep -c "\[big-o:" artifacts/profiling/deep_context_profile.md > /tmp/bigo_count 2>/dev/null || echo "0" > /tmp/bigo_count; \
grep -c "\[provability:" artifacts/profiling/deep_context_profile.md > /tmp/provability_count 2>/dev/null || echo "0" > /tmp/provability_count; \
grep -c "\[churn:" artifacts/profiling/deep_context_profile.md > /tmp/churn_count 2>/dev/null || echo "0" > /tmp/churn_count; \
echo " - Complexity annotations: $$(cat /tmp/complexity_count)"; \
echo " - Cognitive annotations: $$(cat /tmp/cognitive_count)"; \
echo " - Big-O annotations: $$(cat /tmp/bigo_count)"; \
echo " - Provability annotations: $$(cat /tmp/provability_count)"; \
echo " - Churn annotations: $$(cat /tmp/churn_count)"; \
rm -f /tmp/*_count || true; \
else \
echo "โ Failed to generate deep_context_profile.md"; \
fi
@echo ""
@echo "๐ Profiling artifacts saved in artifacts/profiling/"
@echo "โ
Deep context profiling complete!"
# Memory usage analysis
analyze-memory-usage: release
@echo "๐ง Analyzing binary memory usage patterns..."
@mkdir -p artifacts/profiling
@echo "๐ Running memory-intensive operations with monitoring..."
@if command -v /usr/bin/time >/dev/null 2>&1; then \
echo "Context generation memory usage:"; \
/usr/bin/time -v ./target/release/pmat context --format json --output /tmp/memory_test.json 2> artifacts/profiling/memory-context.txt; \
echo "Complexity analysis memory usage:"; \
/usr/bin/time -v ./target/release/pmat analyze complexity --top-files 20 --format json 2> artifacts/profiling/memory-complexity.txt; \
echo "Memory usage reports saved to artifacts/profiling/memory-*.txt"; \
rm -f /tmp/memory_test.json || true; \
else \
echo "โ ๏ธ GNU time not available for detailed memory analysis"; \
echo "Install with: sudo apt-get install time"; \
fi
# Scaling analysis - test with different project sizes
analyze-scaling: release
@echo "๐ Analyzing binary scaling characteristics..."
@mkdir -p artifacts/scaling
@echo "๐ Testing performance with different file counts..."
@echo "Small scope (top 3 files):"
@time ./target/release/pmat analyze complexity --top-files 3 --format json > artifacts/scaling/small-scope.json
@echo ""
@echo "Medium scope (top 10 files):"
@time ./target/release/pmat analyze complexity --top-files 10 --format json > artifacts/scaling/medium-scope.json
@echo ""
@echo "Large scope (top 25 files):"
@time ./target/release/pmat analyze complexity --top-files 25 --format json > artifacts/scaling/large-scope.json
@echo ""
@echo "๐ Comparing output sizes:"
@echo "Small scope: $$(wc -c < artifacts/scaling/small-scope.json) bytes"
@echo "Medium scope: $$(wc -c < artifacts/scaling/medium-scope.json) bytes"
@echo "Large scope: $$(wc -c < artifacts/scaling/large-scope.json) bytes"
@echo "โ
Scaling analysis complete!"
# =============================================================================
# Specification Implementation Targets
# =============================================================================
# Mermaid Specification Testing Targets
setup-mermaid-validator:
@echo "๐ง Setting up Mermaid specification validator..."
@if ! command -v deno &> /dev/null; then \
echo "Error: Deno is required but not installed"; \
echo "Visit https://deno.land to install"; \
exit 1; \
fi
@echo "โ
Deno validator ready"
# Run Mermaid specification compliance tests
test-mermaid-spec: setup-mermaid-validator
@echo "๐งช Running Mermaid specification compliance tests..."
PROPTEST_CASES=2 cargo test mermaid_spec_compliance --features mermaid-spec-tests -- --nocapture
# Validate all generated Mermaid artifacts
validate-mermaid-artifacts: setup-mermaid-validator
@echo "๐ Validating all Mermaid artifacts against spec..."
@if [ -d "artifacts/mermaid" ]; then \
deno run --allow-read scripts/mermaid-validator.ts artifacts/mermaid/; \
else \
echo "โ ๏ธ No artifacts/mermaid directory found. Run 'make generate-artifacts' first."; \
fi
# Generate compliance report for Mermaid diagrams
mermaid-compliance-report: setup-mermaid-validator
@echo "๐ Generating Mermaid compliance report..."
cargo test mermaid_spec_compliance --features mermaid-spec-tests -- --nocapture > ../mermaid-compliance.txt 2>&1 || true
@echo "Report saved to mermaid-compliance.txt"
# Deterministic Artifact Generation Targets
generate-artifacts:
@echo "๐ฏ Generating deterministic artifacts..."
cargo run --release -- generate-artifacts --output ../artifacts/ --deterministic
# Test deterministic generation (multiple runs should be identical)
test-determinism:
@echo "๐ฌ Testing artifact generation determinism..."
PROPTEST_CASES=2 cargo test determinism_tests -- --nocapture
# Verify artifact integrity using stored hashes
verify-artifacts:
@echo "๐ Verifying artifact integrity..."
cargo run --release -- verify-artifacts --path ../artifacts/
# SATD (Self-Admitted Technical Debt) Analysis Targets using built binary
analyze-satd: release
@echo "๐ Analyzing Self-Admitted Technical Debt using built binary..."
@./target/release/pmat analyze satd --format json --output satd-analysis.json
@echo "โ
SATD analysis complete! Report saved to satd-analysis.json"
# Analyze SATD with evolution tracking
analyze-satd-evolution: release
@echo "๐ Analyzing SATD evolution over time using built binary..."
@./target/release/pmat analyze satd --evolution --days 90 --format json --output satd-evolution.json
@echo "โ
SATD evolution analysis complete! Report saved to satd-evolution.json"
# Export critical SATD items in SARIF format
export-critical-satd: release
@echo "โ ๏ธ Exporting critical technical debt items using built binary..."
@./target/release/pmat analyze satd --severity critical --format sarif --output critical-debt.sarif
@echo "โ
Critical SATD export complete! Report saved to critical-debt.sarif"
# Generate comprehensive SATD metrics
satd-metrics: release
@echo "๐ Generating comprehensive SATD metrics using built binary..."
@./target/release/pmat analyze satd --metrics --format json --output satd-metrics.json
@echo "โ
SATD metrics analysis complete! Report saved to satd-metrics.json"
# Clean up validation artifacts
clean-mermaid-validator:
@echo "๐งน Cleaning Mermaid validator artifacts..."
@rm -f mermaid-compliance.txt
# Comprehensive validation of all specifications
validate-all-specs: test-mermaid-spec test-determinism analyze-satd
@echo "โ
All specification implementations validated!"
@echo " โ Mermaid specification compliance"
@echo " โ Deterministic artifact generation"
@echo " โ SATD detection and classification"
# Performance testing for all specifications
benchmark-specs:
@echo "โก Running specification performance benchmarks..."
cargo test --release test_validation_performance --ignored -- --nocapture
cargo test --release test_artifact_generation_determinism --ignored -- --nocapture
# =============================================================================
# KAIZEN - Toyota Way Continuous Improvement
# =============================================================================
# Continuous improvement via Toyota Way principles
kaizen: release ## Toyota Way continuous improvement - comprehensive quality gates
@echo "=== KAIZEN: ๆนๅ - Toyota Way for Claude Code ==="
@echo "Jidoka (่ชๅๅ): Build quality in through automated verification"
@echo "Genchi Genbutsu (็พๅฐ็พ็ฉ): Analyze actual code metrics, not estimates"
@echo "Hansei (ๅ็): Fix existing defects before adding features"
@echo "Muda/Muri/Mura: Eliminate waste, overburden, and unevenness"
@echo ""
@echo "=== STEP 1: Genchi Genbutsu - Measure Reality ==="
@mkdir -p artifacts/kaizen
@./target/release/pmat context --format json --output artifacts/kaizen/kaizen-metrics.json
@echo "๐ Reality Check Complete - Metrics captured in artifacts/kaizen/kaizen-metrics.json"
@echo ""
@echo "=== STEP 2: Jidoka - Quality Gates ==="
@echo "๐ Linting (Zero tolerance for warnings)..."
@$(MAKE) lint || (echo "โ Lint failed - fix before proceeding" && exit 1)
@echo "โ
Linting passed"
@echo ""
@echo "๐งช Testing (Zero tolerance for failures)..."
@$(MAKE) test-fast || (echo "โ Tests failed - regression detected" && exit 1)
@echo "โ
Tests passed"
@echo ""
@echo "๐งฎ Complexity Analysis (Zero tolerance for violations)..."
@./target/release/pmat analyze complexity --max-cyclomatic 10 --max-cognitive 15 || (echo "โ Complexity violations detected" && exit 1)
@echo "โ
Complexity within limits"
@echo ""
@echo "=== STEP 3: Poka-Yoke - Error Proofing ==="
@$(MAKE) check || (echo "โ Type checking failed" && exit 1)
@echo "โ
Type checking passed"
@echo ""
@echo "=== STEP 4: Yokoten - Knowledge Sharing ==="
@$(MAKE) update-rust-docs || true
@echo "๐ Documentation updated"
@echo ""
@echo "โ
KAIZEN COMPLETE! All quality gates passed."
@echo "๐ Metrics saved to artifacts/kaizen/kaizen-metrics.json"
@echo "๐ฏ Zero defects, zero waste, continuous improvement achieved."
# Advanced dogfooding - test all our bug fixes on our own codebase
dogfood-all: release
@echo "๐ COMPREHENSIVE DOGFOODING - Testing all fixes on our own codebase"
@echo ""
@echo "=== Issue #30 & #31: Quality Gate with Check Display and Performance Metrics ==="
@./target/release/pmat quality-gate --perf --max-complexity-p99 20 || (echo "โ Quality gate failed" && exit 1)
@echo "โ
Quality gate passed with check display and performance metrics"
@echo ""
@echo "=== Issue #32: Custom Complexity Thresholds ==="
@./target/release/pmat analyze complexity --max-cyclomatic 15 --max-cognitive 20 --top-files 10
@echo "โ
Custom complexity thresholds working correctly"
@echo ""
@echo "=== Issue #33: Deep Context Complexity Analysis ==="
@./target/release/pmat analyze deep-context --format summary --top-files 5
@echo "โ
Deep context now shows accurate complexity values (not fixed at 1.0)"
@echo ""
@echo "=== Issue #34: Lint Hotspot with Enforcement ==="
@./target/release/pmat analyze lint-hotspot --enforce --top-files 5 || echo "๐ฏ Enforcement triggered as expected (violations found)"
@echo "โ
Enforcement flag now properly affects exit status"
@echo ""
@echo "=== Issue #29: Quality Gate Violation Detection ==="
@./target/release/pmat quality-gate --fail-on-violation --max-complexity-p99 5 || echo "๐ฏ Quality gate correctly detected violations"
@echo "โ
Quality gate now properly detects violations"
@echo ""
@echo "๐ All fixes successfully dogfooded on our own codebase!"
# Enforcement mode for strict CI - will fail build on violations
dogfood-enforce: release
@echo "๐จ ENFORCEMENT MODE - Strict quality enforcement using all fixes"
@echo "โ ๏ธ This will fail the build if quality violations are found"
@echo ""
@./target/release/pmat quality-gate --fail-on-violation --perf --max-complexity-p99 15 || (echo "โ Quality gate enforcement failed" && exit 1)
@./target/release/pmat analyze lint-hotspot --enforce --max-density 0.1 || (echo "โ Lint enforcement failed" && exit 1)
@echo "โ
All enforcement checks passed - zero violations detected"
.PHONY: setup-mermaid-validator test-mermaid-spec validate-mermaid-artifacts mermaid-compliance-report generate-artifacts test-determinism verify-artifacts analyze-satd analyze-satd-evolution export-critical-satd satd-metrics clean-mermaid-validator validate-all-specs benchmark-specs kaizen dogfood-all dogfood-ci dogfood-enforce
# Context generation optimized for server source
context-fast: release
@echo '๐ Generating context for server source code (fast)...'
@cd src && ../../target/release/pmat context --format markdown --output ../../deep_context.md
@echo 'โ
Context generated: deep_context.md'
@echo '๐ File size:' && ls -lh deep_context.md | awk '{print $$5}'
context-benchmark: release
@echo 'โก Benchmarking context generation...'
@mkdir -p artifacts
@echo 'Testing on src directory:'
@hyperfine --warmup 2 --min-runs 5 \
"cd src && ../../target/release/pmat context --format json > /tmp/ctx.json" \
--export-json artifacts/context-benchmark.json
@echo 'Performance results:'
@jq -r '.results[0] | "Mean: \(.mean)s, Min: \(.min)s, Max: \(.max)s"' artifacts/context-benchmark.json
# Context generation (optimized for large codebases)
context: release
@echo '๐ Generating context for source code...'
@target/release/pmat context --format markdown --output deep_context.md
@echo 'โ
Context generated: deep_context.md'
@echo '๐ File size:' && ls -lh deep_context.md | awk '{print $$5}'
context-json: release
@echo '๐ Generating JSON context for source code...'
@target/release/pmat context --format json --output deep_context.json
@echo 'โ
Context generated: deep_context.json'
@echo '๐ File size:' && ls -lh deep_context.json | awk '{print $$5}'
# =============================================================================
# Overnight Autonomous Refactoring System
# =============================================================================
# Start overnight autonomous improvement with state machine
overnight-improve: release
@echo "๐ Starting Overnight Autonomous Improvement System..."
@echo "โฑ๏ธ This will run for 8-12 hours, applying automated fixes"
@echo ""
@echo "๐ Pre-flight checks..."
@$(MAKE) clear-swap
@echo ""
@echo "๐ Current code quality baseline:"
@./target/release/pmat analyze satd --format human | head -20 || true
@echo ""
@echo "๐ Launching improvement state machine..."
@mkdir -p .refactor_state docs/bugs artifacts/refactor
@if [ -f "./scripts/run-overnight-repair.sh" ]; then \
echo "Using run-overnight-repair.sh script..."; \
bash ./scripts/run-overnight-repair.sh; \
else \
echo "Creating and running overnight improvement configuration..."; \
nohup ./target/release/pmat refactor serve \
--refactor-mode batch \
--config refactor-config.json \
--project . \
--parallel 8 \
--memory-limit 16384 \
--batch-size 50 \
--checkpoint-dir .refactor_state \
--resume \
--auto-commit "improvement: automated enhancement via state machine [skip ci]" \
--max-runtime 43200 \
2>&1 | tee improve_overnight.log & \
REFACTOR_PID=$$!; \
echo "$$REFACTOR_PID" > .refactor_state/refactor.pid; \
echo ""; \
echo "โ
Improvement started with PID: $$REFACTOR_PID"; \
echo "๐ Log file: improve_overnight.log"; \
echo "๐ Monitor with: make overnight-monitor"; \
echo "๐ Stop safely with: kill -SIGUSR1 $$REFACTOR_PID"; \
fi
# Monitor overnight refactoring progress
overnight-monitor:
@echo "๐ Overnight Refactoring Monitor"
@echo "================================"
@if [ -f ".refactor_state/refactor.pid" ]; then \
PID=$$(cat .refactor_state/refactor.pid); \
if ps -p $$PID > /dev/null 2>&1; then \
echo "โ
Refactoring running (PID: $$PID)"; \
else \
echo "โ Refactoring not running (PID $$PID not found)"; \
fi; \
else \
echo "โ No refactoring process found"; \
fi
@echo ""
@echo "๐ Memory and Swap Status:"
@free -h | grep -E "Mem:|Swap:"
@echo ""
@if [ -f "refactor_overnight.log" ]; then \
echo "๐ Recent Activity (last 20 lines):"; \
tail -20 refactor_overnight.log | grep -E "STATE:|FIXED:|ERROR:|WARNING:" || tail -20 refactor_overnight.log; \
echo ""; \
echo "๐ Statistics:"; \
echo " States: $$(grep -c "STATE:" refactor_overnight.log 2>/dev/null || echo 0)"; \
echo " Fixed: $$(grep -c "FIXED:" refactor_overnight.log 2>/dev/null || echo 0)"; \
echo " Errors: $$(grep -c "ERROR:" refactor_overnight.log 2>/dev/null || echo 0)"; \
echo " Warnings: $$(grep -c "WARNING:" refactor_overnight.log 2>/dev/null || echo 0)"; \
else \
echo "โ ๏ธ No log file found yet"; \
fi
@echo ""
@echo "๐ก Commands:"
@echo " View full log: tail -f refactor_overnight.log"
@echo " Clear swap if needed: make clear-swap"
@echo " Stop safely: kill -SIGUSR1 $$(cat .refactor_state/refactor.pid 2>/dev/null || echo '<PID>')"
# Set up cron job for periodic swap clearing during overnight runs
overnight-swap-cron:
@echo "โฐ Setting up periodic swap clearing for overnight refactoring..."
@CRON_CMD="cd $(CURDIR) && ./scripts/clear-swap-periodic.sh --threshold 50 --log .refactor_state/swap-clear.log"; \
CRON_ENTRY="*/30 * * * * $$CRON_CMD"; \
echo ""; \
echo "๐ Cron entry to add:"; \
echo "$$CRON_ENTRY"; \
echo ""; \
echo "To install, run:"; \
echo " 1. crontab -e"; \
echo " 2. Add the line above"; \
echo " 3. Save and exit"; \
echo ""; \
echo "Or run this command to append it:"; \
echo " (crontab -l 2>/dev/null; echo \"$$CRON_ENTRY\") | crontab -"; \
echo ""; \
echo "๐ This will:"; \
echo " - Check swap usage every 30 minutes"; \
echo " - Clear swap if usage exceeds 50%"; \
echo " - Only act if overnight refactor is running"; \
echo " - Log actions to .refactor_state/swap-clear.log"
# =============================================================================
# Toyota Way Quality-Enforced Development Targets (ruchy-inspired)
# =============================================================================
# Development with quality checks (Toyota Way Genchi Genbutsu)
dev:
@echo "๐ฏ Toyota Way Development - Starting with quality checks..."
@echo "๐ Checking current quality status..."
@if [ -f "./target/debug/pmat" ]; then \
echo "Running quality gate analysis..."; \
./target/debug/pmat quality-gate || echo "โ ๏ธ Quality gate warnings found"; \
else \
echo "Building PMAT for quality analysis..."; \
make build; \
fi
@echo ""
@echo "๐ Documentation synchronization status:"
@ls -la docs/execution/ 2>/dev/null || echo " ๐ Run './scripts/setup-quality.sh' to initialize"
@echo ""
@echo "โ
Ready for Toyota Way development!"
@echo " ๐ฏ Remember: Documentation MUST be updated with code changes"
@echo " ๐ง Use 'make commit' for quality-enforced commits"
# Quality-enforced commit (Toyota Way Jidoka)
commit:
@echo "๐ง Toyota Way Quality-Enforced Commit (Jidoka)..."
@echo ""
@echo "๐ Checking for staged changes..."
@if [ -z "$$(git diff --cached --name-only)" ]; then \
echo "โ No staged changes found!"; \
echo " Stage your changes first: git add <files>"; \
exit 1; \
fi
@echo "๐ Staged files:"
@git diff --cached --name-only | sed 's/^/ โ /'
@echo ""
@echo "๐ Running pre-commit quality gates..."
@if [ -x ".git/hooks/pre-commit" ]; then \
.git/hooks/pre-commit; \
else \
echo "โ ๏ธ Pre-commit hook not found - run './scripts/setup-quality.sh'"; \
echo "Continuing with basic validation..."; \
make validate; \
fi
@echo ""
@echo "๐ฌ Please provide commit message (PMAT-XXXX format recommended):"
@read -p "Commit message: " MSG; \
if [ -z "$$MSG" ]; then \
echo "โ Commit message cannot be empty"; \
exit 1; \
fi; \
git commit -m "$$MSG" || exit 1; \
echo ""; \
echo "โ
Quality-enforced commit completed!"; \
echo " ๐ฏ Toyota Way: Quality built-in at source"
# Sprint quality verification (Toyota Way Kaizen)
sprint-close:
@echo "๐ Sprint Quality Verification (Toyota Way Kaizen)..."
@echo ""
@echo "๐ Running comprehensive quality analysis..."
@make validate
@echo ""
@echo "๐ Checking documentation synchronization..."
@if [ -f "docs/execution/roadmap.md" ]; then \
echo "โ Roadmap documentation found"; \
if grep -q "โ
COMPLETED" docs/execution/roadmap.md; then \
echo "โ Completed tasks found in roadmap"; \
else \
echo "โ ๏ธ No completed tasks marked in roadmap"; \
fi; \
else \
echo "โ Roadmap documentation missing"; \
echo " Run './scripts/setup-quality.sh' to initialize"; \
exit 1; \
fi
@if [ -f "docs/execution/quality-gates.md" ]; then \
echo "โ Quality gates documentation found"; \
else \
echo "โ Quality gates documentation missing"; \
exit 1; \
fi
@echo ""
@echo "๐งช Running full test suite..."
@make test-all || (echo "โ Tests failed - cannot close sprint" && exit 1)
@echo ""
@echo "๐ง Running quality gate analysis..."
@if [ -f "./target/debug/pmat" ]; then \
./target/debug/pmat quality-gate --strict || (echo "โ Quality gates failed" && exit 1); \
else \
echo "โ ๏ธ PMAT binary not found, building..."; \
make build && ./target/debug/pmat quality-gate --strict; \
fi
@echo ""
@echo "๐ Updating velocity tracking..."
@if [ -f "docs/execution/velocity.json" ]; then \
echo "โ Velocity data found"; \
echo " ๐ Consider updating completed tasks and metrics"; \
fi
@echo ""
@echo "โ
Sprint quality verification PASSED!"
@echo " ๐ฏ Toyota Way: Continuous improvement achieved"
@echo " ๐ All quality gates met"
@echo " ๐ Documentation synchronized"
@echo " ๐งช All tests passing"
@echo " ๐ง Zero quality violations"
@echo ""
@echo "๐ Ready for sprint completion and release!"
# Setup quality enforcement (one-time)
setup-quality:
@echo "๐ง Setting up Toyota Way quality enforcement..."
@if [ -x "./scripts/setup-quality.sh" ]; then \
./scripts/setup-quality.sh; \
else \
echo "โ setup-quality.sh script not found or not executable"; \
echo " Ensure scripts/setup-quality.sh exists and is executable"; \
exit 1; \
fi
# Quality gate with documentation sync check
quality-gate-full:
@echo "๐ Comprehensive Quality Gate Analysis..."
@echo ""
@echo "1๏ธโฃ Running PMAT quality analysis..."
@if [ -f "./target/debug/pmat" ]; then \
./target/debug/pmat quality-gate --strict; \
else \
echo "Building PMAT first..."; \
make build && ./target/debug/pmat quality-gate --strict; \
fi
@echo ""
@echo "2๏ธโฃ Checking documentation synchronization..."
@if [ -f "docs/execution/roadmap.md" ] && [ -f "docs/execution/quality-gates.md" ]; then \
echo "โ Documentation structure complete"; \
else \
echo "โ Documentation structure incomplete"; \
echo " Run 'make setup-quality' to initialize"; \
exit 1; \
fi
@echo ""
@echo "3๏ธโฃ Validating Toyota Way standards..."
@make validate
@echo ""
@echo "โ
Comprehensive quality gate analysis PASSED!"
# Help for Toyota Way targets
help-toyota-way:
@echo "๐ฏ Toyota Way Quality-Enforced Development Commands:"
@echo ""
@echo "Setup (run once):"
@echo " make setup-quality - Initialize quality enforcement system"
@echo ""
@echo "Development workflow:"
@echo " make dev - Start development with quality checks"
@echo " make commit - Create quality-enforced commit"
@echo " make sprint-close - Verify sprint quality before release"
@echo ""
@echo "Quality analysis:"
@echo " make quality-gate-full - Comprehensive quality gate analysis"
@echo " pmat quality-gate - Basic quality gate check"
@echo ""
@echo "๐ฏ Toyota Way Principles:"
@echo " - Genchi Genbutsu: Go and see the actual problems"
@echo " - Jidoka: Automation with human oversight"
@echo " - Kaizen: Continuous incremental improvement"
@echo " - Documentation synchronization enforced"
@echo " - Quality built-in at source"
@echo ""
## Dependency Reduction Benchmarking
## Pattern: Modeled after trueno-db competitive benchmarking methodology
## Spec: docs/specifications/dependency-reduction-benchmarking-framework.md
bench-baseline: ## Measure current baseline (build times, binary size, dependencies)
@echo "๐ Measuring baseline metrics..."
@echo "๐ Spec: docs/specifications/dependency-reduction-benchmarking-framework.md"
@./benchmarks/measure-baseline.sh
bench-deps: ## Count dependencies across configurations
@echo "๐ฆ Dependency counts:"
@echo " Minimal (rust-only): $$(cargo tree --no-default-features --features rust-only 2>/dev/null | wc -l)"
@echo " Default: $$(cargo tree 2>/dev/null | wc -l)"
@echo " All features: $$(cargo tree --all-features 2>/dev/null | wc -l)"
bench-binary-size: ## Measure binary sizes across configurations
@echo "๐ Measuring binary sizes..."
@cargo build --release --no-default-features --features rust-only > /dev/null 2>&1
@echo " Minimal (rust-only): $$(ls -lh target/release/pmat | awk '{print $$5}')"
@cargo build --release > /dev/null 2>&1
@echo " Default: $$(ls -lh target/release/pmat | awk '{print $$5}')"
@cargo build --release --all-features > /dev/null 2>&1
@echo " All features: $$(ls -lh target/release/pmat | awk '{print $$5}')"
bench-build-times: ## Measure build times across configurations (takes ~10-15 minutes)
@echo "โฑ๏ธ Benchmarking build times (this will take 10-15 minutes)..."
@echo " Testing: minimal (rust-only)"
@cargo clean > /dev/null 2>&1
@time cargo build --release --no-default-features --features rust-only
@echo " Testing: default"
@cargo clean > /dev/null 2>&1
@time cargo build --release
@echo " Testing: all-features"
@cargo clean > /dev/null 2>&1
@time cargo build --release --all-features
@echo "โ
Build time benchmarks complete"
bench-quick: bench-deps bench-binary-size ## Quick benchmark (deps + binary size, ~1-2 minutes)
@echo "โ
Quick benchmarks complete"
bench-perf: ## Runtime performance benchmark (~90s) โ checks against baseline
@echo "โฑ๏ธ Running performance benchmarks (18 operations)..."
@FAIL=0; \
bench() { \
local NAME=$$1; shift; local BUDGET=$$1; shift; \
local START=$$(date +%s%N); \
eval "$$@" 2>/dev/null >/dev/null; \
local END=$$(date +%s%N); \
local MS=$$(( (END - START) / 1000000 )); \
if [ $$MS -gt $$BUDGET ]; then \
printf " โ %-30s %5dms (budget: %dms)\n" "$$NAME" $$MS $$BUDGET; \
FAIL=1; \
else \
printf " โ
%-30s %5dms\n" "$$NAME" $$MS; \
fi; \
}; \
bench "query (semantic)" 500 "pmat query test --limit 1"; \
bench "query (literal)" 500 "pmat query --literal .unwrap --limit 1"; \
bench "query (regex)" 500 "pmat query --regex 'fn.new' --limit 1"; \
bench "query (coverage-gaps)" 500 "pmat query --coverage-gaps --limit 1"; \
bench "rust-project-score" 5000 "pmat rust-project-score"; \
bench "analyze complexity" 5000 "pmat analyze complexity --path ."; \
bench "analyze satd" 5000 "pmat analyze satd --path ."; \
bench "five-whys" 5000 "pmat five-whys test --depth 1"; \
bench "explain" 100 "pmat explain cb-200"; \
bench "doctor" 100 "pmat doctor"; \
bench "proj-diag" 5000 "pmat proj-diag"; \
if [ $$FAIL -eq 1 ]; then \
echo ""; echo "โ Some benchmarks exceeded budget"; exit 1; \
else \
echo ""; echo "โ
All benchmarks within budget"; \
fi
bench-all: bench-baseline ## Run all dependency reduction benchmarks
@echo "โ
All benchmarks complete"
@echo "๐ Results in benchmarks/results/"
@echo "๐ Review latest: ls -lt benchmarks/results/ | head -2"
## PMAT Integration (Dogfooding O(1) Quality Gates)
.PHONY: pmat-validate-docs
pmat-validate-docs: ## Validate documentation accuracy (hallucination detection - Phase 3.5)
@echo "๐ Validating documentation accuracy (Phase 3.5)..."
@which pmat > /dev/null 2>&1 || { echo "โ PMAT not found! Install with: cargo install --path server"; exit 1; }
@cargo run --release --bin pmat -- context --output deep_context.md --format llm-optimized
@cargo run --release --bin pmat -- validate-readme \
--targets README.md CLAUDE.md AGENT.md \
--deep-context deep_context.md \
--fail-on-contradiction \
--verbose || { \
echo "โ Documentation validation failed!"; \
exit 1; \
}
@echo "โ
Documentation validation complete - zero hallucinations!"
.PHONY: pmat-quality-gate
pmat-quality-gate: ## Run PMAT quality gates (O(1) validation)
@echo "๐ Running PMAT quality gates (dogfooding)..."
@which pmat > /dev/null 2>&1 || { echo "โ PMAT not found! Install with: cargo install --path server"; exit 1; }
@cargo run --release --bin pmat -- quality-gate --check-metrics --check-tdg
@echo "โ
PMAT quality gates passed!"
.PHONY: pmat-rust-score
pmat-rust-score: ## Run Rust Project Score assessment (dogfooding)
@echo "๐ฆ Running Rust Project Score assessment (dogfooding)..."
@which pmat > /dev/null 2>&1 || { echo "โ PMAT not found! Install with: cargo install --path server"; exit 1; }
@cargo run --release --bin pmat -- rust-project-score --verbose
@echo "โ
Rust Project Score complete!"