CARGO := cargo
DOT ?= dot
GRAPH_FILE ?= examples/sample_graph.json
DOT_PATH ?= target/graph.dot
PNG_PATH ?= target/graph.png
ONNX_PATH ?= target/graph.onnx
COREML_PATH ?= target/graph.mlmodel
COREMLC_PATH ?= target/graph.mlmodelc
ORT_VERSION ?= 1.23.2
ORT_BASE ?= https://github.com/microsoft/onnxruntime/releases/download/v$(ORT_VERSION)
ORT_TARBALL ?= onnxruntime-osx-arm64-$(ORT_VERSION).tgz
ORT_DIR ?= target/onnxruntime
ORT_LIB_DIR ?= $(ORT_DIR)/onnxruntime-osx-arm64-$(ORT_VERSION)/lib
ORT_LIB_LOCATION ?= $(ORT_LIB_DIR)
.PHONY: build test fmt run viz onnx coreml coreml-validate onnx-validate validate-all-env \
python-dev python-build python-test python-test-wpt python-test-wpt-onnx python-test-wpt-coreml \
python-perf python-perf-full python-clean python-example \
mobilenet-demo text-gen-demo text-gen-train text-gen-trained text-gen-enhanced text-gen-train-simple \
docs-serve docs-build docs-clean ci-docs \
help clean-all
clean:
$(CARGO) clean
rm -f target/graph.dot target/graph.png target/graph.onnx target/graph.mlmodel
rm -rf target/graph.mlmodelc
rm -rf $(ORT_DIR)
build:
$(CARGO) build
test:
@echo "Running clippy..."
$(CARGO) clippy --all-targets -- -D warnings
@echo "Running tests..."
$(CARGO) test
fmt:
$(CARGO) fmt
run:
$(CARGO) run -- examples/sample_graph.json
viz:
$(CARGO) run -- $(GRAPH_FILE) --export-dot $(DOT_PATH)
$(DOT) -Tpng $(DOT_PATH) -o $(PNG_PATH)
open $(PNG_PATH)
onnxruntime-download:
@if [ -d "$(ORT_LIB_DIR)" ]; then \
echo "ONNX Runtime already downloaded at $(ORT_LIB_DIR)"; \
else \
echo "Downloading ONNX Runtime $(ORT_VERSION)..."; \
mkdir -p $(ORT_DIR); \
curl -L $(ORT_BASE)/$(ORT_TARBALL) -o $(ORT_DIR)/$(ORT_TARBALL); \
tar -xzf $(ORT_DIR)/$(ORT_TARBALL) -C $(ORT_DIR); \
echo "[OK] ONNX Runtime downloaded and extracted"; \
fi
onnx: onnxruntime-download
ORT_STRATEGY=system ORT_LIB_LOCATION=$(ORT_LIB_LOCATION) DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) RUSTFLAGS="-L $(ORT_LIB_DIR)" $(CARGO) run --features onnx-runtime -- $(GRAPH_FILE) --convert onnx --convert-output $(ONNX_PATH)
@echo "ONNX graph written to $(ONNX_PATH)"
onnx-validate: onnx
ORT_STRATEGY=system ORT_LIB_LOCATION=$(ORT_LIB_LOCATION) DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) RUSTFLAGS="-L $(ORT_LIB_DIR)" $(CARGO) run --features onnx-runtime -- $(GRAPH_FILE) --convert onnx --convert-output $(ONNX_PATH) --run-onnx
coreml:
$(CARGO) run -- $(GRAPH_FILE) --convert coreml --convert-output $(COREML_PATH)
@echo "CoreML graph written to $(COREML_PATH)"
coreml-validate: coreml
$(CARGO) run --features coreml-runtime -- $(GRAPH_FILE) --convert coreml --convert-output $(COREML_PATH) --run-coreml --coreml-compiled-output $(COREMLC_PATH)
webnn-venv:
python3 -m venv .venv-webnn
. .venv-webnn/bin/activate && pip install --upgrade pip && pip install git+https://github.com/huningxin/onnx2webnn.git
validate-all-env: build test onnx-validate coreml-validate
@echo "Full pipeline (build/test/convert/validate) completed."
python-dev: onnxruntime-download
@echo "Installing Python package in development mode with all backends..."
@if [ ! -d .venv-webnn ]; then \
python3.12 -m venv .venv-webnn; \
.venv-webnn/bin/pip install --upgrade pip; \
.venv-webnn/bin/pip install pytest pytest-asyncio numpy maturin; \
fi
VIRTUAL_ENV=$(PWD)/.venv-webnn \
PATH=$(PWD)/.venv-webnn/bin:$$PATH \
ORT_STRATEGY=system \
ORT_LIB_LOCATION=$(ORT_LIB_LOCATION) \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) \
RUSTFLAGS="-L $(ORT_LIB_DIR)" \
.venv-webnn/bin/maturin develop --features python,onnx-runtime,coreml-runtime
python-build: onnxruntime-download
@echo "Building Python wheel with all backends..."
pip install maturin
ORT_STRATEGY=system \
ORT_LIB_LOCATION=$(ORT_LIB_LOCATION) \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) \
RUSTFLAGS="-L $(ORT_LIB_DIR)" \
maturin build --features python,onnx-runtime,coreml-runtime --release
python-test: python-dev
@echo "Running Python tests (includes WPT conformance tests)..."
@if [ -f .venv-webnn/bin/python ]; then \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) .venv-webnn/bin/python -m pytest tests/ -v; \
EXIT_CODE=$$?; \
if [ $$EXIT_CODE -eq 134 ] || [ $$EXIT_CODE -eq 139 ]; then \
echo "[WARNING] Note: Python crashed during cleanup"; \
echo "[OK] All tests passed successfully before the crash"; \
exit 0; \
else \
exit $$EXIT_CODE; \
fi; \
else \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) python -m pytest tests/ -v; \
EXIT_CODE=$$?; \
if [ $$EXIT_CODE -eq 134 ] || [ $$EXIT_CODE -eq 139 ]; then \
echo "[WARNING] Note: Python crashed during cleanup"; \
echo "[OK] All tests passed successfully before the crash"; \
exit 0; \
else \
exit $$EXIT_CODE; \
fi; \
fi
python-test-wpt: python-dev
@echo "Running WPT conformance tests only..."
@if [ -f .venv-webnn/bin/python ]; then \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) .venv-webnn/bin/python -m pytest tests/test_wpt_conformance.py -v; \
else \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) python -m pytest tests/test_wpt_conformance.py -v; \
fi
python-test-wpt-onnx: python-dev
@echo "Running WPT conformance tests - ONNX backend only..."
@if [ -f .venv-webnn/bin/python ]; then \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) .venv-webnn/bin/python -m pytest tests/test_wpt_conformance.py -k "onnx" -v; \
else \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) python -m pytest tests/test_wpt_conformance.py -k "onnx" -v; \
fi
python-test-wpt-coreml: python-dev
@echo "Running WPT conformance tests - CoreML backend only..."
@if [ -f .venv-webnn/bin/python ]; then \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) .venv-webnn/bin/python -m pytest tests/test_wpt_conformance.py -k "coreml" -v; \
else \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) python -m pytest tests/test_wpt_conformance.py -k "coreml" -v; \
fi
python-perf: python-dev
@echo "Running performance benchmarks (quick)..."
@if [ -f .venv-webnn/bin/python ]; then \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) .venv-webnn/bin/python -m pytest tests/test_performance.py -m "benchmark and not slow" -v -s; \
else \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) python -m pytest tests/test_performance.py -m "benchmark and not slow" -v -s; \
fi
python-perf-full: python-dev
@echo "Running full performance benchmark suite..."
@if [ -f .venv-webnn/bin/python ]; then \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) .venv-webnn/bin/python -m pytest tests/test_performance.py -m "benchmark" -v -s; \
else \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) python -m pytest tests/test_performance.py -m "benchmark" -v -s; \
fi
python-example: python-dev
@echo "Running Python examples..."
@if [ -f .venv-webnn/bin/python ]; then \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) .venv-webnn/bin/python examples/python_simple.py; \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) .venv-webnn/bin/python examples/python_matmul.py; \
else \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) python examples/python_simple.py; \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) python examples/python_matmul.py; \
fi
mobilenet-demo: python-dev
@echo "Installing demo dependencies..."
@if [ -f .venv-webnn/bin/python ]; then \
.venv-webnn/bin/pip install -q Pillow requests; \
else \
pip install -q Pillow requests; \
fi
@echo "========================================================================"
@echo "Running MobileNetV2 Image Classifier on All Backends"
@echo "========================================================================"
@echo ""
@echo "Backend 1/3: ONNX CPU"
@echo "------------------------------------------------------------------------"
@if [ -f .venv-webnn/bin/python ]; then \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) .venv-webnn/bin/python examples/mobilenetv2_complete.py examples/images/test.jpg --backend cpu; \
else \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) python examples/mobilenetv2_complete.py examples/images/test.jpg --backend cpu; \
fi
@echo ""
@echo "Backend 2/3: ONNX GPU"
@echo "------------------------------------------------------------------------"
@if [ -f .venv-webnn/bin/python ]; then \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) .venv-webnn/bin/python examples/mobilenetv2_complete.py examples/images/test.jpg --backend gpu; \
else \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) python examples/mobilenetv2_complete.py examples/images/test.jpg --backend gpu; \
fi
@echo ""
@echo "Backend 3/3: CoreML (Neural Engine)"
@echo "------------------------------------------------------------------------"
@if [ -f .venv-webnn/bin/python ]; then \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) .venv-webnn/bin/python examples/mobilenetv2_complete.py examples/images/test.jpg --backend coreml; \
else \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) python examples/mobilenetv2_complete.py examples/images/test.jpg --backend coreml; \
fi
@echo ""
@echo "========================================================================"
@echo "All three backends completed successfully!"
@echo "========================================================================"
text-gen-demo: python-dev
@echo "========================================================================"
@echo "Running Text Generation Demo on All Backends"
@echo "========================================================================"
@echo ""
@echo "Backend 1/3: ONNX CPU"
@echo "------------------------------------------------------------------------"
@if [ -f .venv-webnn/bin/python ]; then \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) .venv-webnn/bin/python examples/text_generation_gpt.py --prompt "Hello world" --tokens 30 --temperature 0.8 --backend cpu; \
else \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) python examples/text_generation_gpt.py --prompt "Hello world" --tokens 30 --temperature 0.8 --backend cpu; \
fi
@echo ""
@echo "Backend 2/3: ONNX GPU"
@echo "------------------------------------------------------------------------"
@if [ -f .venv-webnn/bin/python ]; then \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) .venv-webnn/bin/python examples/text_generation_gpt.py --prompt "Hello world" --tokens 30 --temperature 0.8 --backend gpu; \
else \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) python examples/text_generation_gpt.py --prompt "Hello world" --tokens 30 --temperature 0.8 --backend gpu; \
fi
@echo ""
@echo "Backend 3/3: CoreML (Neural Engine)"
@echo "------------------------------------------------------------------------"
@if [ -f .venv-webnn/bin/python ]; then \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) .venv-webnn/bin/python examples/text_generation_gpt.py --prompt "Hello world" --tokens 30 --temperature 0.8 --backend coreml; \
else \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) python examples/text_generation_gpt.py --prompt "Hello world" --tokens 30 --temperature 0.8 --backend coreml; \
fi
@echo ""
@echo "========================================================================"
@echo "All three backends completed successfully!"
@echo "========================================================================"
text-gen-train: python-dev
@echo "========================================================================"
@echo "Training Text Generation Model"
@echo "========================================================================"
@echo ""
@if [ -f .venv-webnn/bin/python ]; then \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) .venv-webnn/bin/python examples/train_text_model.py --data examples/sample_text.txt --epochs 10 --batch-size 32 --lr 0.05 --save trained_model.json; \
else \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) python examples/train_text_model.py --data examples/sample_text.txt --epochs 10 --batch-size 32 --lr 0.05 --save trained_model.json; \
fi
@echo ""
@echo "========================================================================"
@echo "Training completed! Model saved to trained_model.json"
@echo "========================================================================"
text-gen-trained: python-dev
@echo "========================================================================"
@echo "Text Generation with Trained Weights"
@echo "========================================================================"
@echo ""
@if [ ! -f trained_model.json ]; then \
echo "Error: trained_model.json not found. Run 'make text-gen-train' first."; \
exit 1; \
fi
@if [ -f .venv-webnn/bin/python ]; then \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) .venv-webnn/bin/python examples/text_generation_gpt.py --weights trained_model.json --prompt "The model" --tokens 50 --temperature 0.8 --backend cpu; \
else \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) python examples/text_generation_gpt.py --weights trained_model.json --prompt "The model" --tokens 50 --temperature 0.8 --backend cpu; \
fi
@echo ""
@echo "========================================================================"
@echo "Generation with trained weights completed!"
@echo "========================================================================"
text-gen-enhanced: python-dev
@echo "========================================================================"
@echo "Enhanced Text Generation with KV Cache"
@echo "========================================================================"
@echo ""
@if [ -f .venv-webnn/bin/python ]; then \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) .venv-webnn/bin/python examples/text_generation_enhanced.py --use-kv-cache --prompt "Hello world" --tokens 30 --temperature 0.8 --backend cpu; \
else \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) python examples/text_generation_enhanced.py --use-kv-cache --prompt "Hello world" --tokens 30 --temperature 0.8 --backend cpu; \
fi
@echo ""
@echo "========================================================================"
@echo "Enhanced generation with KV cache completed!"
@echo "========================================================================"
text-gen-train-simple: python-dev
@echo "========================================================================"
@echo "Simple Training Demo That Actually Works!"
@echo "========================================================================"
@echo ""
@if [ -f .venv-webnn/bin/python ]; then \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) .venv-webnn/bin/python examples/train_simple_demo.py --phrase "the quick brown fox" --epochs 100 --lr 0.15; \
else \
DYLD_LIBRARY_PATH=$(ORT_LIB_DIR) python examples/train_simple_demo.py --phrase "the quick brown fox" --epochs 100 --lr 0.15; \
fi
@echo ""
@echo "========================================================================"
@echo "Simple training completed - loss decreased!"
@echo "========================================================================"
python-clean:
@echo "Cleaning Python artifacts..."
rm -rf target/wheels
rm -rf python/webnn/__pycache__
rm -rf tests/__pycache__
rm -rf examples/__pycache__
find . -name "*.pyc" -delete
find . -name "*.so" -delete
find . -name "*.pyd" -delete
docs-serve:
@echo "Serving documentation with live reload..."
@command -v mkdocs >/dev/null 2>&1 || { echo "Installing mkdocs..."; pip install -r docs/requirements.txt; }
mkdocs serve
docs-build:
@echo "Building documentation..."
@command -v mkdocs >/dev/null 2>&1 || { echo "Installing mkdocs..."; pip install -r docs/requirements.txt; }
mkdocs build
ci-docs:
@echo "Building documentation in strict mode (CI)..."
@command -v mkdocs >/dev/null 2>&1 || { echo "Installing mkdocs..."; pip install -r docs/requirements.txt; }
mkdocs build --strict
docs-clean:
@echo "Cleaning documentation build artifacts..."
rm -rf site/
clean-all: clean python-clean docs-clean
@echo "All build artifacts cleaned."
help:
@echo "Rust WebNN Graph - Available Targets"
@echo "===================================="
@echo ""
@echo "Rust Targets:"
@echo " build - Build the Rust project"
@echo " test - Run Rust tests"
@echo " fmt - Format Rust code"
@echo " run - Run with sample graph"
@echo " clean - Clean Rust build artifacts"
@echo ""
@echo "Visualization:"
@echo " viz - Generate and open graph visualization"
@echo ""
@echo "ONNX Conversion:"
@echo " onnxruntime-download - Download ONNX Runtime"
@echo " onnx - Convert graph to ONNX format"
@echo " onnx-validate - Convert and validate ONNX graph"
@echo ""
@echo "CoreML Conversion:"
@echo " coreml - Convert graph to CoreML format"
@echo " coreml-validate - Convert and validate CoreML graph"
@echo ""
@echo "Python API:"
@echo " python-dev - Install Python package in development mode"
@echo " python-build - Build Python wheel"
@echo " python-test - Run all Python tests (includes WPT tests)"
@echo " python-test-wpt - Run WPT conformance tests only"
@echo " python-perf - Run quick performance benchmarks"
@echo " python-perf-full - Run full performance benchmark suite"
@echo " python-example - Run Python examples"
@echo " mobilenet-demo - Run MobileNetV2 classifier on all 3 backends"
@echo " text-gen-demo - Run basic text generation with attention"
@echo " text-gen-train - Train text generation model on sample data"
@echo " text-gen-trained - Generate text using trained model weights"
@echo " text-gen-enhanced - Run enhanced version with KV cache"
@echo " text-gen-train-simple - Simple training that actually works!"
@echo " python-clean - Clean Python artifacts"
@echo ""
@echo "Documentation:"
@echo " docs-serve - Serve documentation with live reload"
@echo " docs-build - Build static documentation site"
@echo " ci-docs - Build documentation in strict mode (CI)"
@echo " docs-clean - Clean documentation artifacts"
@echo ""
@echo "Comprehensive:"
@echo " validate-all-env - Run full pipeline (build/test/convert/validate)"
@echo " clean-all - Clean all artifacts (Rust + Python + docs)"
@echo " help - Show this help message"
@echo ""