brainharmony 0.1.0

Brain-Harmony multimodal brain foundation model — inference in Rust with Burn ML
Documentation
#!/usr/bin/env python3
"""Generate benchmark comparison chart: Python vs Rust for Brain-Harmony."""
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np

# Benchmark results — 10 runs, best/median, Apple M4 Pro
# After tiled attention optimization (tile=1024)
labels = [
    "Python MPS\n(Apple GPU)",
    "Python CPU\n(PyTorch 2.11)",
    "Rust wgpu f16\n(Metal GPU)",
    "Rust wgpu f32\n(Metal GPU)",
    "Rust Accelerate\n(Apple CPU)",
]
best_s  = [2.2,  3.6,  4.8,  6.4,  45.4]
med_s   = [2.4,  3.8,  4.8,  6.8,  45.7]

colors = ["#0F9D58", "#4285F4", "#7B1FA2", "#CE93D8", "#F4B400"]

baseline = best_s[1]  # Python CPU

fig, ax = plt.subplots(figsize=(11, 4.5))
fig.patch.set_facecolor("white")

y_pos = np.arange(len(labels))
bars = ax.barh(y_pos, best_s, color=colors, edgecolor="white", height=0.52, zorder=3)

for i, (bar, bs, ms) in enumerate(zip(bars, best_s, med_s)):
    w = bar.get_width()
    ratio = baseline / bs
    if ratio > 1.05:
        ratio_str = f"{ratio:.1f}x faster"
    elif ratio < 0.95:
        ratio_str = f"{1/ratio:.1f}x slower"
    else:
        ratio_str = "baseline"

    text = f"  {bs:.1f}s  (med {ms:.1f}s)  {ratio_str}"

    if w > 25:
        ax.text(w - 0.3, i, text, va="center", ha="right",
                fontsize=9.5, fontweight="bold", color="white", family="monospace")
    else:
        ax.text(w + 0.3, i, text, va="center", ha="left",
                fontsize=9.5, fontweight="bold", color="#333", family="monospace")

# Reference line
ax.axvline(x=baseline, color="#4285F4", linestyle="--", alpha=0.35, linewidth=1.2, zorder=2)
ax.text(baseline + 0.2, -0.45, "Python CPU", fontsize=8, color="#4285F4", alpha=0.7, va="top")

ax.set_yticks(y_pos)
ax.set_yticklabels(labels, fontsize=10, fontweight="bold")
ax.set_xlabel("Inference time (seconds) — lower is better", fontsize=11)
ax.set_title("Brain-Harmony ViT-Base Encoder  |  12L / 768d / 7200 patches  |  Apple M4 Pro",
             fontsize=11.5, fontweight="bold", pad=10)
ax.invert_yaxis()
ax.set_xlim(0, max(best_s) * 1.32)
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%.0fs'))
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.grid(axis="x", alpha=0.12, zorder=1)

plt.tight_layout()
plt.savefig("/Users/Shared/brainharmony-rs/figures/benchmark.png",
            dpi=200, bbox_inches="tight", facecolor="white", edgecolor="none")
print("Saved figures/benchmark.png")