.DEFAULT_GOAL := help
.PHONY: help build test check bench clean doc examples install dev setup format lint audit security release error-setup error-audit error-check error-count error-analyze error-refactor-dry error-validate clean-error-tools
RED := \033[31m
GREEN := \033[32m
YELLOW := \033[33m
BLUE := \033[34m
RESET := \033[0m
PROJECT_NAME := graph_d
VERSION := $(shell grep '^version' Cargo.toml | sed 's/.*"\(.*\)".*/\1/')
TARGET_DIR := target
help:
@echo "$(BLUE)Graph_D Native Graph Database - Development Commands$(RESET)"
@echo "===================================================="
@echo ""
@echo "$(GREEN)Available targets:$(RESET)"
@awk 'BEGIN {FS = ":.*##"} /^[a-zA-Z_-]+:.*?##/ { printf " $(BLUE)%-20s$(RESET) %s\n", $$1, $$2 }' $(MAKEFILE_LIST)
@echo ""
@echo "$(YELLOW)Project Info:$(RESET)"
@echo " Name: $(PROJECT_NAME)"
@echo " Version: $(VERSION)"
@echo " Target: $(TARGET_DIR)"
@echo ""
info:
@echo "$(BLUE)Project Information$(RESET)"
@echo "=================="
@echo "Name: $(PROJECT_NAME)"
@echo "Version: $(VERSION)"
@echo "Rust Version: $$(rustc --version)"
@echo "Cargo Version: $$(cargo --version)"
@echo ""
@echo "$(BLUE)Git Status$(RESET)"
@echo "=========="
@git status --short || echo "Not a git repository"
@echo ""
@echo "$(BLUE)Dependencies$(RESET)"
@echo "============"
@cargo tree --depth 1 | head -10
setup:
@echo "$(BLUE)Setting up development environment...$(RESET)"
@rustup update stable
@rustup component add clippy rustfmt
@cargo install cargo-audit cargo-tarpaulin
@echo "$(GREEN)Development environment ready!$(RESET)"
install:
@echo "$(BLUE)Installing $(PROJECT_NAME)...$(RESET)"
@cargo install --path .
@echo "$(GREEN)Installation complete!$(RESET)"
build:
@echo "$(BLUE)Building $(PROJECT_NAME) in debug mode...$(RESET)"
@cargo build
@echo "$(GREEN)Debug build complete!$(RESET)"
build-release:
@echo "$(BLUE)Building $(PROJECT_NAME) in release mode...$(RESET)"
@cargo build --release
@echo "$(GREEN)Release build complete!$(RESET)"
build-all:
@echo "$(BLUE)Building all targets...$(RESET)"
@cargo build --all-targets
@echo "$(GREEN)All targets built successfully!$(RESET)"
check:
@echo "$(BLUE)Checking code compilation...$(RESET)"
@cargo check
@cargo check --tests
@cargo check --benches
@cargo check --examples
@echo "$(GREEN)Code check passed!$(RESET)"
test:
@echo "$(BLUE)Running all tests...$(RESET)"
@cargo test
@echo "$(GREEN)All tests passed!$(RESET)"
test-unit:
@echo "$(BLUE)Running unit tests...$(RESET)"
@cargo test --lib
@echo "$(GREEN)Unit tests passed!$(RESET)"
test-integration:
@echo "$(BLUE)Running integration tests...$(RESET)"
@cargo test --test '*'
@echo "$(GREEN)Integration tests passed!$(RESET)"
test-memory:
@echo "$(BLUE)Running tests with memory management...$(RESET)"
@cargo test --features unsafe-allocators
@echo "$(GREEN)Memory management tests passed!$(RESET)"
test-coverage:
@echo "$(BLUE)Generating test coverage report...$(RESET)"
@cargo tarpaulin --out Html --output-dir target/coverage
@echo "$(GREEN)Coverage report generated in target/coverage/tarpaulin-report.html$(RESET)"
test-watch:
@echo "$(BLUE)Running tests in watch mode...$(RESET)"
@cargo watch -x test
bench:
@echo "$(BLUE)Running all benchmarks...$(RESET)"
@cargo bench
@echo "$(GREEN)Benchmarks complete! Check target/criterion for detailed reports.$(RESET)"
bench-basic:
@echo "$(BLUE)Running basic benchmarks...$(RESET)"
@cargo bench -- basic_benches
@echo "$(GREEN)Basic benchmarks complete!$(RESET)"
bench-scalability:
@echo "$(BLUE)Running scalability benchmarks...$(RESET)"
@cargo bench -- scalability_benches
@echo "$(GREEN)Scalability benchmarks complete!$(RESET)"
bench-memory:
@echo "$(BLUE)Running memory management benchmarks...$(RESET)"
@cargo bench -- memory_benches
@echo "$(GREEN)Memory benchmarks complete!$(RESET)"
bench-advanced:
@echo "$(BLUE)Running advanced benchmarks (this may take a while)...$(RESET)"
@cargo bench -- advanced_benches
@echo "$(GREEN)Advanced benchmarks complete!$(RESET)"
bench-regression:
@echo "$(BLUE)Running performance regression detection...$(RESET)"
@cargo bench -- bench_performance_regression_detection
@echo "$(GREEN)Regression detection complete!$(RESET)"
perf-report:
@echo "$(BLUE)Generating performance analysis report...$(RESET)"
@echo "# Performance Report - Generated $$(date)" > PERFORMANCE_REPORT.md
@echo "" >> PERFORMANCE_REPORT.md
@echo "## Basic Benchmarks" >> PERFORMANCE_REPORT.md
@cargo bench -- basic_benches 2>&1 | grep -E "(time:|throughput:)" | head -10 >> PERFORMANCE_REPORT.md || true
@echo "" >> PERFORMANCE_REPORT.md
@echo "## Memory Benchmarks" >> PERFORMANCE_REPORT.md
@cargo bench -- memory_benches 2>&1 | grep -E "(time:|throughput:)" | head -10 >> PERFORMANCE_REPORT.md || true
@echo "$(GREEN)Performance report generated: PERFORMANCE_REPORT.md$(RESET)"
format:
@echo "$(BLUE)Formatting code...$(RESET)"
@cargo fmt
@echo "$(GREEN)Code formatted!$(RESET)"
format-check:
@echo "$(BLUE)Checking code formatting...$(RESET)"
@cargo fmt -- --check
@echo "$(GREEN)Code formatting is correct!$(RESET)"
lint:
@echo "$(BLUE)Running clippy lints...$(RESET)"
@cargo clippy -- -D warnings
@echo "$(GREEN)Linting passed!$(RESET)"
lint-fix:
@echo "$(BLUE)Fixing clippy warnings...$(RESET)"
@cargo clippy --fix --allow-dirty --allow-staged
@echo "$(GREEN)Auto-fixes applied!$(RESET)"
audit:
@echo "$(BLUE)Running security audit...$(RESET)"
@cargo audit
@echo "$(GREEN)Security audit complete!$(RESET)"
security: audit
outdated:
@echo "$(BLUE)Checking for outdated dependencies...$(RESET)"
@cargo outdated || echo "$(YELLOW)Install cargo-outdated: cargo install cargo-outdated$(RESET)"
doc:
@echo "$(BLUE)Generating documentation...$(RESET)"
@cargo doc --no-deps --document-private-items
@echo "$(GREEN)Documentation generated in target/doc/$(RESET)"
doc-open:
@echo "$(BLUE)Generating and opening documentation...$(RESET)"
@cargo doc --no-deps --document-private-items --open
doc-test:
@echo "$(BLUE)Running documentation tests...$(RESET)"
@cargo test --doc
@echo "$(GREEN)Documentation tests passed!$(RESET)"
examples:
@echo "$(BLUE)Building examples...$(RESET)"
@cargo build --examples
@echo "$(GREEN)Examples built successfully!$(RESET)"
run-memory-example:
@echo "$(BLUE)Running memory management example...$(RESET)"
@cargo run --example memory_management
@echo "$(GREEN)Memory management example completed!$(RESET)"
dev: format lint check test
ci:
@echo "$(BLUE)Running CI pipeline...$(RESET)"
@$(MAKE) format-check
@$(MAKE) lint
@$(MAKE) check
@$(MAKE) test
@$(MAKE) bench-basic
@echo "$(GREEN)CI pipeline completed successfully!$(RESET)"
pre-commit:
@echo "$(BLUE)Running pre-commit checks...$(RESET)"
@$(MAKE) format
@$(MAKE) lint
@$(MAKE) check
@$(MAKE) test-unit
@echo "$(GREEN)Pre-commit checks passed!$(RESET)"
quick-check:
@echo "$(BLUE)Running quick check...$(RESET)"
@cargo fmt
@cargo check
@cargo test --lib
@echo "$(GREEN)Quick check completed!$(RESET)"
release:
@echo "$(BLUE)Building optimized release...$(RESET)"
@cargo build --release
@strip target/release/$(PROJECT_NAME) 2>/dev/null || true
@echo "$(GREEN)Release build complete! Binary: target/release/$(PROJECT_NAME)$(RESET)"
release-check:
@echo "$(BLUE)Running pre-release checks...$(RESET)"
@$(MAKE) format-check
@$(MAKE) lint
@$(MAKE) test
@$(MAKE) bench-basic
@$(MAKE) audit
@$(MAKE) build-release
@echo "$(GREEN)Release checks passed!$(RESET)"
package:
@echo "$(BLUE)Creating release package...$(RESET)"
@cargo package
@echo "$(GREEN)Package created in target/package/$(RESET)"
clean:
@echo "$(BLUE)Cleaning build artifacts...$(RESET)"
@cargo clean
@rm -rf target/coverage target/criterion PERFORMANCE_REPORT.md || true
@echo "$(GREEN)Clean complete!$(RESET)"
deps:
@echo "$(BLUE)Dependency tree:$(RESET)"
@cargo tree
size:
@echo "$(BLUE)Binary size information:$(RESET)"
@ls -lh target/release/$(PROJECT_NAME) 2>/dev/null || echo "Release binary not found. Run 'make release' first."
@ls -lh target/debug/$(PROJECT_NAME) 2>/dev/null || echo "Debug binary not found. Run 'make build' first."
git-status:
@echo "$(BLUE)Git Status:$(RESET)"
@git status --short
@echo ""
@echo "$(BLUE)Recent commits:$(RESET)"
@git log --oneline -10
commit-stats:
@echo "$(BLUE)Commit Statistics:$(RESET)"
@git shortlog -sn | head -10
memory-test:
@echo "$(BLUE)Running memory usage analysis...$(RESET)"
@cargo test test_memory_manager --release -- --nocapture
@echo "$(GREEN)Memory tests complete!$(RESET)"
perf-profile:
@echo "$(BLUE)Running performance profiling...$(RESET)"
@echo "$(YELLOW)Note: This requires 'perf' tools to be installed$(RESET)"
@cargo build --release
@perf record -g target/release/$(PROJECT_NAME) --help 2>/dev/null || echo "$(RED)perf not available$(RESET)"
flamegraph:
@echo "$(BLUE)Generating flamegraph...$(RESET)"
@cargo flamegraph --bin $(PROJECT_NAME) || echo "$(YELLOW)Install cargo-flamegraph: cargo install flamegraph$(RESET)"
db-test:
@echo "$(BLUE)Running database tests...$(RESET)"
@cargo test graph:: node:: relationship:: storage::
@echo "$(GREEN)Database tests passed!$(RESET)"
gql-test:
@echo "$(BLUE)Running GQL tests...$(RESET)"
@cargo test gql::
@echo "$(GREEN)GQL tests passed!$(RESET)"
all: build test doc examples
full-check: format lint check test bench doc audit
fresh: clean setup build test
build-unsafe:
@echo "$(BLUE)Building with unsafe allocators...$(RESET)"
@cargo build --features unsafe-allocators
@echo "$(GREEN)Unsafe allocators build complete!$(RESET)"
test-unsafe:
@echo "$(BLUE)Testing with unsafe allocators...$(RESET)"
@cargo test --features unsafe-allocators
@echo "$(GREEN)Unsafe allocators tests passed!$(RESET)"
bench-unsafe:
@echo "$(BLUE)Benchmarking with unsafe allocators...$(RESET)"
@cargo bench --features unsafe-allocators
@echo "$(GREEN)Unsafe allocators benchmarks complete!$(RESET)"
ERROR_TOOLS_DIR := tools
ERROR_OUTPUT_DIR := target/error_audit
error-setup:
@echo "$(BLUE)Setting up error handling tools...$(RESET)"
@rustup component add clippy rustfmt
@mkdir -p $(ERROR_OUTPUT_DIR)
@echo "$(GREEN)Error handling tools ready!$(RESET)"
error-audit: error-setup
@echo "$(BLUE)Running comprehensive error handling audit...$(RESET)"
@echo "This will analyze all unwrap() calls in the codebase"
@echo ""
@if [ -f "$(ERROR_TOOLS_DIR)/target/release/error_audit_cli" ]; then \
(cd $(ERROR_TOOLS_DIR) && ./target/release/error_audit_cli audit --path ../src --output ../$(ERROR_OUTPUT_DIR) --verbose); \
else \
echo "$(YELLOW)Building error audit tool first...$(RESET)"; \
cargo build --manifest-path $(ERROR_TOOLS_DIR)/Cargo.toml --release --bin error_audit_cli; \
(cd $(ERROR_TOOLS_DIR) && cargo run --bin error_audit_cli -- audit --path ../src --output ../$(ERROR_OUTPUT_DIR) --verbose); \
fi
@echo ""
@echo "$(GREEN)Audit complete! Check $(ERROR_OUTPUT_DIR) for detailed reports.$(RESET)"
error-check:
@echo "$(BLUE)Checking for unwrap() calls in production code...$(RESET)"
@if grep -r "\.unwrap()" src/ --include="*.rs" --exclude-dir=tests; then \
echo "$(RED)❌ Production unwrap() calls found!$(RESET)"; \
echo "$(YELLOW)Run 'make error-audit' for detailed analysis$(RESET)"; \
exit 1; \
else \
echo "$(GREEN)✅ No production unwrap() calls found$(RESET)"; \
fi
error-count:
@echo "$(BLUE)Counting unwrap() occurrences by module...$(RESET)"
@echo "Module | Direct unwrap() | expect() | unwrap_or* | Total"
@echo "-------|-----------------|----------|-----------|------"
@for module in storage memory transaction query gql graph index; do \
if [ -d "src/$$module" ]; then \
direct=$$(grep -r "\.unwrap()" src/$$module/ --include="*.rs" | wc -l | tr -d ' '); \
expect=$$(grep -r "\.expect(" src/$$module/ --include="*.rs" | wc -l | tr -d ' '); \
unwrap_or=$$(grep -r "\.unwrap_or" src/$$module/ --include="*.rs" | wc -l | tr -d ' '); \
total=$$(($$direct + $$expect + $$unwrap_or)); \
printf "%-6s | %15s | %8s | %9s | %5s\n" $$module $$direct $$expect $$unwrap_or $$total; \
fi \
done
@echo ""
@echo "$(YELLOW)Total patterns across all modules:$(RESET)"
@grep -r "\.unwrap()" src/ --include="*.rs" | wc -l | xargs echo " Direct unwrap() calls:"
@grep -r "\.expect(" src/ --include="*.rs" | wc -l | xargs echo " expect() calls:"
@grep -r "\.unwrap_or" src/ --include="*.rs" | wc -l | xargs echo " unwrap_or* calls:"
error-analyze:
@echo "$(BLUE)Analyzing specific files...$(RESET)"
@if [ -z "$(FILE)" ]; then \
echo "$(RED)ERROR: Please specify FILE=path/to/file.rs$(RESET)"; \
exit 1; \
fi
@(cd $(ERROR_TOOLS_DIR) && cargo run --bin error_audit_cli -- analyze ../$(FILE) --detailed --suggestions)
error-refactor-dry:
@echo "$(BLUE)Dry run: showing potential refactoring changes...$(RESET)"
@if [ -z "$(TASK)" ]; then \
echo "$(YELLOW)No specific task provided, showing all critical issues$(RESET)"; \
cd $(ERROR_TOOLS_DIR) && cargo run --bin error_audit_cli -- refactor --risk critical --dry-run; \
else \
cd $(ERROR_TOOLS_DIR) && cargo run --bin error_audit_cli -- refactor --task $(TASK) --dry-run; \
fi
error-validate:
@echo "$(BLUE)Validating refactoring results...$(RESET)"
@if [ ! -f "$(ERROR_OUTPUT_DIR)/error_audit_results.json" ]; then \
echo "$(RED)ERROR: No audit results found. Run 'make error-audit' first.$(RESET)"; \
exit 1; \
fi
@cd $(ERROR_TOOLS_DIR) && cargo run --bin error_audit_cli -- validate \
--before ../$(ERROR_OUTPUT_DIR)/error_audit_results.json \
--test-cmd "make test-unit"
clean-error-tools:
@echo "$(BLUE)Cleaning error handling artifacts...$(RESET)"
@rm -rf $(ERROR_OUTPUT_DIR)
@cd $(ERROR_TOOLS_DIR) && cargo clean
@echo "$(GREEN)Error handling artifacts cleaned$(RESET)"