graph_d 1.3.1

A native graph database implementation in Rust with built-in JSON support and SQLite-like simplicity
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
# Graph_D Native Graph Database - Development Makefile
# =====================================================
# Comprehensive build, test, benchmark, and development workflow commands

.DEFAULT_GOAL := help
.PHONY: help build test check bench clean doc examples install dev setup format lint audit security release error-setup error-audit error-check error-count error-analyze error-refactor-dry error-validate clean-error-tools

# Colors for output
RED    := \033[31m
GREEN  := \033[32m
YELLOW := \033[33m
BLUE   := \033[34m
RESET  := \033[0m

# Project metadata
PROJECT_NAME := graph_d
VERSION := $(shell grep '^version' Cargo.toml | sed 's/.*"\(.*\)".*/\1/')
TARGET_DIR := target

## Help and Information
##===========================================

help: ## Show this help message
	@echo "$(BLUE)Graph_D Native Graph Database - Development Commands$(RESET)"
	@echo "===================================================="
	@echo ""
	@echo "$(GREEN)Available targets:$(RESET)"
	@awk 'BEGIN {FS = ":.*##"} /^[a-zA-Z_-]+:.*?##/ { printf "  $(BLUE)%-20s$(RESET) %s\n", $$1, $$2 }' $(MAKEFILE_LIST)
	@echo ""
	@echo "$(YELLOW)Project Info:$(RESET)"
	@echo "  Name: $(PROJECT_NAME)"
	@echo "  Version: $(VERSION)"
	@echo "  Target: $(TARGET_DIR)"
	@echo ""

info: ## Display project information and status
	@echo "$(BLUE)Project Information$(RESET)"
	@echo "=================="
	@echo "Name: $(PROJECT_NAME)"
	@echo "Version: $(VERSION)"
	@echo "Rust Version: $$(rustc --version)"
	@echo "Cargo Version: $$(cargo --version)"
	@echo ""
	@echo "$(BLUE)Git Status$(RESET)"
	@echo "=========="
	@git status --short || echo "Not a git repository"
	@echo ""
	@echo "$(BLUE)Dependencies$(RESET)"
	@echo "============"
	@cargo tree --depth 1 | head -10

## Development Setup
##===========================================

setup: ## Set up development environment
	@echo "$(BLUE)Setting up development environment...$(RESET)"
	@rustup update stable
	@rustup component add clippy rustfmt
	@cargo install cargo-audit cargo-tarpaulin
	@echo "$(GREEN)Development environment ready!$(RESET)"

install: ## Install the graph database binary
	@echo "$(BLUE)Installing $(PROJECT_NAME)...$(RESET)"
	@cargo install --path .
	@echo "$(GREEN)Installation complete!$(RESET)"

## Build Commands
##===========================================

build: ## Build the project in debug mode
	@echo "$(BLUE)Building $(PROJECT_NAME) in debug mode...$(RESET)"
	@cargo build
	@echo "$(GREEN)Debug build complete!$(RESET)"

build-release: ## Build the project in release mode
	@echo "$(BLUE)Building $(PROJECT_NAME) in release mode...$(RESET)"
	@cargo build --release
	@echo "$(GREEN)Release build complete!$(RESET)"

build-all: ## Build all targets (lib, bin, examples, tests, benches)
	@echo "$(BLUE)Building all targets...$(RESET)"
	@cargo build --all-targets
	@echo "$(GREEN)All targets built successfully!$(RESET)"

check: ## Run cargo check for fast compilation checking
	@echo "$(BLUE)Checking code compilation...$(RESET)"
	@cargo check
	@cargo check --tests
	@cargo check --benches
	@cargo check --examples
	@echo "$(GREEN)Code check passed!$(RESET)"

## Testing Commands
##===========================================

test: ## Run all tests
	@echo "$(BLUE)Running all tests...$(RESET)"
	@cargo test
	@echo "$(GREEN)All tests passed!$(RESET)"

test-unit: ## Run unit tests only
	@echo "$(BLUE)Running unit tests...$(RESET)"
	@cargo test --lib
	@echo "$(GREEN)Unit tests passed!$(RESET)"

test-integration: ## Run integration tests only
	@echo "$(BLUE)Running integration tests...$(RESET)"
	@cargo test --test '*'
	@echo "$(GREEN)Integration tests passed!$(RESET)"

test-memory: ## Run tests with memory management features
	@echo "$(BLUE)Running tests with memory management...$(RESET)"
	@cargo test --features unsafe-allocators
	@echo "$(GREEN)Memory management tests passed!$(RESET)"

test-coverage: ## Generate test coverage report
	@echo "$(BLUE)Generating test coverage report...$(RESET)"
	@cargo tarpaulin --out Html --output-dir target/coverage
	@echo "$(GREEN)Coverage report generated in target/coverage/tarpaulin-report.html$(RESET)"

test-watch: ## Run tests in watch mode (requires cargo-watch)
	@echo "$(BLUE)Running tests in watch mode...$(RESET)"
	@cargo watch -x test

## Benchmarking and Performance
##===========================================

bench: ## Run all benchmarks
	@echo "$(BLUE)Running all benchmarks...$(RESET)"
	@cargo bench
	@echo "$(GREEN)Benchmarks complete! Check target/criterion for detailed reports.$(RESET)"

bench-basic: ## Run basic performance benchmarks
	@echo "$(BLUE)Running basic benchmarks...$(RESET)"
	@cargo bench -- basic_benches
	@echo "$(GREEN)Basic benchmarks complete!$(RESET)"

bench-scalability: ## Run scalability benchmarks
	@echo "$(BLUE)Running scalability benchmarks...$(RESET)"
	@cargo bench -- scalability_benches
	@echo "$(GREEN)Scalability benchmarks complete!$(RESET)"

bench-memory: ## Run memory management benchmarks
	@echo "$(BLUE)Running memory management benchmarks...$(RESET)"
	@cargo bench -- memory_benches
	@echo "$(GREEN)Memory benchmarks complete!$(RESET)"

bench-advanced: ## Run advanced performance benchmarks (including 1M nodes)
	@echo "$(BLUE)Running advanced benchmarks (this may take a while)...$(RESET)"
	@cargo bench -- advanced_benches
	@echo "$(GREEN)Advanced benchmarks complete!$(RESET)"

bench-regression: ## Run performance regression detection
	@echo "$(BLUE)Running performance regression detection...$(RESET)"
	@cargo bench -- bench_performance_regression_detection
	@echo "$(GREEN)Regression detection complete!$(RESET)"

perf-report: ## Generate performance analysis report
	@echo "$(BLUE)Generating performance analysis report...$(RESET)"
	@echo "# Performance Report - Generated $$(date)" > PERFORMANCE_REPORT.md
	@echo "" >> PERFORMANCE_REPORT.md
	@echo "## Basic Benchmarks" >> PERFORMANCE_REPORT.md
	@cargo bench -- basic_benches 2>&1 | grep -E "(time:|throughput:)" | head -10 >> PERFORMANCE_REPORT.md || true
	@echo "" >> PERFORMANCE_REPORT.md
	@echo "## Memory Benchmarks" >> PERFORMANCE_REPORT.md
	@cargo bench -- memory_benches 2>&1 | grep -E "(time:|throughput:)" | head -10 >> PERFORMANCE_REPORT.md || true
	@echo "$(GREEN)Performance report generated: PERFORMANCE_REPORT.md$(RESET)"

## Code Quality and Linting
##===========================================

format: ## Format code using rustfmt
	@echo "$(BLUE)Formatting code...$(RESET)"
	@cargo fmt
	@echo "$(GREEN)Code formatted!$(RESET)"

format-check: ## Check if code is properly formatted
	@echo "$(BLUE)Checking code formatting...$(RESET)"
	@cargo fmt -- --check
	@echo "$(GREEN)Code formatting is correct!$(RESET)"

lint: ## Run clippy for linting
	@echo "$(BLUE)Running clippy lints...$(RESET)"
	@cargo clippy -- -D warnings
	@echo "$(GREEN)Linting passed!$(RESET)"

lint-fix: ## Automatically fix clippy warnings where possible
	@echo "$(BLUE)Fixing clippy warnings...$(RESET)"
	@cargo clippy --fix --allow-dirty --allow-staged
	@echo "$(GREEN)Auto-fixes applied!$(RESET)"

## Security and Auditing
##===========================================

audit: ## Run security audit
	@echo "$(BLUE)Running security audit...$(RESET)"
	@cargo audit
	@echo "$(GREEN)Security audit complete!$(RESET)"

security: audit ## Alias for audit

outdated: ## Check for outdated dependencies
	@echo "$(BLUE)Checking for outdated dependencies...$(RESET)"
	@cargo outdated || echo "$(YELLOW)Install cargo-outdated: cargo install cargo-outdated$(RESET)"

## Documentation
##===========================================

doc: ## Generate documentation
	@echo "$(BLUE)Generating documentation...$(RESET)"
	@cargo doc --no-deps --document-private-items
	@echo "$(GREEN)Documentation generated in target/doc/$(RESET)"

doc-open: ## Generate and open documentation in browser
	@echo "$(BLUE)Generating and opening documentation...$(RESET)"
	@cargo doc --no-deps --document-private-items --open

doc-test: ## Run documentation tests
	@echo "$(BLUE)Running documentation tests...$(RESET)"
	@cargo test --doc
	@echo "$(GREEN)Documentation tests passed!$(RESET)"

## Examples and Demos
##===========================================

examples: ## Build all examples
	@echo "$(BLUE)Building examples...$(RESET)"
	@cargo build --examples
	@echo "$(GREEN)Examples built successfully!$(RESET)"

run-memory-example: ## Run the memory management example
	@echo "$(BLUE)Running memory management example...$(RESET)"
	@cargo run --example memory_management
	@echo "$(GREEN)Memory management example completed!$(RESET)"

## Development Workflow
##===========================================

dev: format lint check test ## Run full development check (format, lint, check, test)

ci: ## Run CI pipeline locally
	@echo "$(BLUE)Running CI pipeline...$(RESET)"
	@$(MAKE) format-check
	@$(MAKE) lint
	@$(MAKE) check
	@$(MAKE) test
	@$(MAKE) bench-basic
	@echo "$(GREEN)CI pipeline completed successfully!$(RESET)"

pre-commit: ## Run pre-commit checks
	@echo "$(BLUE)Running pre-commit checks...$(RESET)"
	@$(MAKE) format
	@$(MAKE) lint
	@$(MAKE) check
	@$(MAKE) test-unit
	@echo "$(GREEN)Pre-commit checks passed!$(RESET)"

quick-check: ## Quick development check (format, check, unit tests)
	@echo "$(BLUE)Running quick check...$(RESET)"
	@cargo fmt
	@cargo check
	@cargo test --lib
	@echo "$(GREEN)Quick check completed!$(RESET)"

## Release and Deployment
##===========================================

release: ## Build optimized release version
	@echo "$(BLUE)Building optimized release...$(RESET)"
	@cargo build --release
	@strip target/release/$(PROJECT_NAME) 2>/dev/null || true
	@echo "$(GREEN)Release build complete! Binary: target/release/$(PROJECT_NAME)$(RESET)"

release-check: ## Run all checks before release
	@echo "$(BLUE)Running pre-release checks...$(RESET)"
	@$(MAKE) format-check
	@$(MAKE) lint
	@$(MAKE) test
	@$(MAKE) bench-basic
	@$(MAKE) audit
	@$(MAKE) build-release
	@echo "$(GREEN)Release checks passed!$(RESET)"

package: ## Create release package
	@echo "$(BLUE)Creating release package...$(RESET)"
	@cargo package
	@echo "$(GREEN)Package created in target/package/$(RESET)"

## Utility Commands
##===========================================

clean: ## Clean build artifacts
	@echo "$(BLUE)Cleaning build artifacts...$(RESET)"
	@cargo clean
	@rm -rf target/coverage target/criterion PERFORMANCE_REPORT.md || true
	@echo "$(GREEN)Clean complete!$(RESET)"

deps: ## Show dependency tree
	@echo "$(BLUE)Dependency tree:$(RESET)"
	@cargo tree

size: ## Show binary size information
	@echo "$(BLUE)Binary size information:$(RESET)"
	@ls -lh target/release/$(PROJECT_NAME) 2>/dev/null || echo "Release binary not found. Run 'make release' first."
	@ls -lh target/debug/$(PROJECT_NAME) 2>/dev/null || echo "Debug binary not found. Run 'make build' first."

## Git and Version Control
##===========================================

git-status: ## Show git status and recent commits
	@echo "$(BLUE)Git Status:$(RESET)"
	@git status --short
	@echo ""
	@echo "$(BLUE)Recent commits:$(RESET)"
	@git log --oneline -10

commit-stats: ## Show commit statistics
	@echo "$(BLUE)Commit Statistics:$(RESET)"
	@git shortlog -sn | head -10

## Memory and Performance Analysis
##===========================================

memory-test: ## Run memory usage tests
	@echo "$(BLUE)Running memory usage analysis...$(RESET)"
	@cargo test test_memory_manager --release -- --nocapture
	@echo "$(GREEN)Memory tests complete!$(RESET)"

perf-profile: ## Run performance profiling (requires perf tools)
	@echo "$(BLUE)Running performance profiling...$(RESET)"
	@echo "$(YELLOW)Note: This requires 'perf' tools to be installed$(RESET)"
	@cargo build --release
	@perf record -g target/release/$(PROJECT_NAME) --help 2>/dev/null || echo "$(RED)perf not available$(RESET)"

flamegraph: ## Generate flamegraph (requires cargo-flamegraph)
	@echo "$(BLUE)Generating flamegraph...$(RESET)"
	@cargo flamegraph --bin $(PROJECT_NAME) || echo "$(YELLOW)Install cargo-flamegraph: cargo install flamegraph$(RESET)"

## Database Operations
##===========================================

db-test: ## Run database-specific tests
	@echo "$(BLUE)Running database tests...$(RESET)"
	@cargo test graph:: node:: relationship:: storage::
	@echo "$(GREEN)Database tests passed!$(RESET)"

gql-test: ## Run GQL-specific tests
	@echo "$(BLUE)Running GQL tests...$(RESET)"
	@cargo test gql::
	@echo "$(GREEN)GQL tests passed!$(RESET)"

## All-in-one commands
##===========================================

all: build test doc examples ## Build everything

full-check: format lint check test bench doc audit ## Run comprehensive checks

fresh: clean setup build test ## Fresh start: clean, setup, build, test

## Feature-specific builds
##===========================================

build-unsafe: ## Build with unsafe allocators feature
	@echo "$(BLUE)Building with unsafe allocators...$(RESET)"
	@cargo build --features unsafe-allocators
	@echo "$(GREEN)Unsafe allocators build complete!$(RESET)"

test-unsafe: ## Test with unsafe allocators feature
	@echo "$(BLUE)Testing with unsafe allocators...$(RESET)"
	@cargo test --features unsafe-allocators
	@echo "$(GREEN)Unsafe allocators tests passed!$(RESET)"

bench-unsafe: ## Benchmark with unsafe allocators feature
	@echo "$(BLUE)Benchmarking with unsafe allocators...$(RESET)"
	@cargo bench --features unsafe-allocators
	@echo "$(GREEN)Unsafe allocators benchmarks complete!$(RESET)"

## Error Handling Enhancement Tools
##===========================================

# Configuration for error handling tools
ERROR_TOOLS_DIR := tools
ERROR_OUTPUT_DIR := target/error_audit

error-setup: ## Set up error handling tools and dependencies
	@echo "$(BLUE)Setting up error handling tools...$(RESET)"
	@rustup component add clippy rustfmt
	@mkdir -p $(ERROR_OUTPUT_DIR)
	@echo "$(GREEN)Error handling tools ready!$(RESET)"

error-audit: error-setup ## Run comprehensive error handling audit
	@echo "$(BLUE)Running comprehensive error handling audit...$(RESET)"
	@echo "This will analyze all unwrap() calls in the codebase"
	@echo ""
	@if [ -f "$(ERROR_TOOLS_DIR)/target/release/error_audit_cli" ]; then \
		(cd $(ERROR_TOOLS_DIR) && ./target/release/error_audit_cli audit --path ../src --output ../$(ERROR_OUTPUT_DIR) --verbose); \
	else \
		echo "$(YELLOW)Building error audit tool first...$(RESET)"; \
		cargo build --manifest-path $(ERROR_TOOLS_DIR)/Cargo.toml --release --bin error_audit_cli; \
		(cd $(ERROR_TOOLS_DIR) && cargo run --bin error_audit_cli -- audit --path ../src --output ../$(ERROR_OUTPUT_DIR) --verbose); \
	fi
	@echo ""
	@echo "$(GREEN)Audit complete! Check $(ERROR_OUTPUT_DIR) for detailed reports.$(RESET)"

error-check: ## Check for unwrap() in production code (CI-friendly)
	@echo "$(BLUE)Checking for unwrap() calls in production code...$(RESET)"
	@if grep -r "\.unwrap()" src/ --include="*.rs" --exclude-dir=tests; then \
		echo "$(RED)❌ Production unwrap() calls found!$(RESET)"; \
		echo "$(YELLOW)Run 'make error-audit' for detailed analysis$(RESET)"; \
		exit 1; \
	else \
		echo "$(GREEN)✅ No production unwrap() calls found$(RESET)"; \
	fi

error-count: ## Count unwrap occurrences by module
	@echo "$(BLUE)Counting unwrap() occurrences by module...$(RESET)"
	@echo "Module | Direct unwrap() | expect() | unwrap_or* | Total"
	@echo "-------|-----------------|----------|-----------|------"
	@for module in storage memory transaction query gql graph index; do \
		if [ -d "src/$$module" ]; then \
			direct=$$(grep -r "\.unwrap()" src/$$module/ --include="*.rs" | wc -l | tr -d ' '); \
			expect=$$(grep -r "\.expect(" src/$$module/ --include="*.rs" | wc -l | tr -d ' '); \
			unwrap_or=$$(grep -r "\.unwrap_or" src/$$module/ --include="*.rs" | wc -l | tr -d ' '); \
			total=$$(($$direct + $$expect + $$unwrap_or)); \
			printf "%-6s | %15s | %8s | %9s | %5s\n" $$module $$direct $$expect $$unwrap_or $$total; \
		fi \
	done
	@echo ""
	@echo "$(YELLOW)Total patterns across all modules:$(RESET)"
	@grep -r "\.unwrap()" src/ --include="*.rs" | wc -l | xargs echo "  Direct unwrap() calls:"
	@grep -r "\.expect(" src/ --include="*.rs" | wc -l | xargs echo "  expect() calls:"
	@grep -r "\.unwrap_or" src/ --include="*.rs" | wc -l | xargs echo "  unwrap_or* calls:"

error-analyze: ## Analyze specific files for unwrap patterns
	@echo "$(BLUE)Analyzing specific files...$(RESET)"
	@if [ -z "$(FILE)" ]; then \
		echo "$(RED)ERROR: Please specify FILE=path/to/file.rs$(RESET)"; \
		exit 1; \
	fi
	@(cd $(ERROR_TOOLS_DIR) && cargo run --bin error_audit_cli -- analyze ../$(FILE) --detailed --suggestions)

error-refactor-dry: ## Show what would be refactored (dry run)
	@echo "$(BLUE)Dry run: showing potential refactoring changes...$(RESET)"
	@if [ -z "$(TASK)" ]; then \
		echo "$(YELLOW)No specific task provided, showing all critical issues$(RESET)"; \
		cd $(ERROR_TOOLS_DIR) && cargo run --bin error_audit_cli -- refactor --risk critical --dry-run; \
	else \
		cd $(ERROR_TOOLS_DIR) && cargo run --bin error_audit_cli -- refactor --task $(TASK) --dry-run; \
	fi

error-validate: ## Validate refactoring results
	@echo "$(BLUE)Validating refactoring results...$(RESET)"
	@if [ ! -f "$(ERROR_OUTPUT_DIR)/error_audit_results.json" ]; then \
		echo "$(RED)ERROR: No audit results found. Run 'make error-audit' first.$(RESET)"; \
		exit 1; \
	fi
	@cd $(ERROR_TOOLS_DIR) && cargo run --bin error_audit_cli -- validate \
		--before ../$(ERROR_OUTPUT_DIR)/error_audit_results.json \
		--test-cmd "make test-unit"

clean-error-tools: ## Clean error handling tool artifacts
	@echo "$(BLUE)Cleaning error handling artifacts...$(RESET)"
	@rm -rf $(ERROR_OUTPUT_DIR)
	@cd $(ERROR_TOOLS_DIR) && cargo clean
	@echo "$(GREEN)Error handling artifacts cleaned$(RESET)"