# CRUX-A-01 — Pull-model short-name alias map.
# Parity target: `ollama pull <short>` resolves canonical short names via
# https://ollama.com/library. aprender parity: `apr pull <short>` resolves
# via this file, shipped as a release asset and read at startup.
#
# Schema: str → str
# key: canonical short name, no scheme prefix
# value: fully-qualified URL (hf:// or https://)
#
# Invariants (enforced by CRUX-A-01 v1.1.0 FALSIFY-002):
# - {"llama3", "mistral", "phi3", "qwen2"} ⊆ keys(map)
# - each value matches regex ^(hf|https)://
# - resolution is deterministic across invocations
llama2: hf://meta-llama/Llama-2-7b-chat-hf
llama3: hf://meta-llama/Meta-Llama-3-8B-Instruct
mistral: hf://mistralai/Mistral-7B-Instruct-v0.3
phi3: hf://microsoft/Phi-3-mini-4k-instruct
qwen2: hf://Qwen/Qwen2-7B-Instruct
qwen2.5: hf://Qwen/Qwen2.5-7B-Instruct
qwen2.5-coder: hf://Qwen/Qwen2.5-Coder-7B-Instruct
# Recommended default for `apr code` on a 24 GB GPU (e.g. RTX 4090):
# 30B-A3B MoE (3B active per token) at Q4_K_M ~= 17-19 GB; 73-87 tok/s
# on RTX 4090; 50.3% SWE-Bench Verified; 256K native context. See
# `aprender-orchestrate/src/agent/manifest.rs::is_preferred_default_model`.
qwen3-coder: hf://unsloth/Qwen3-Coder-30B-A3B-Instruct-GGUF
gemma: hf://google/gemma-7b-it
gemma2: hf://google/gemma-2-9b-it