1use std::path::{Path, PathBuf};
4
5use serde::{Deserialize, Serialize};
6use tracing::info;
7
8use crate::InferenceError;
9
10#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
12#[serde(rename_all = "snake_case")]
13pub enum ModelRole {
14 Small,
16 Medium,
18 Large,
20 Expert,
22 Embedding,
24}
25
26#[derive(Debug, Clone, Serialize, Deserialize)]
28pub struct ModelInfo {
29 pub name: String,
30 pub hf_repo: String,
31 pub hf_filename: String,
32 pub tokenizer_repo: String,
33 pub role: ModelRole,
34 pub param_count: &'static str,
35 pub quantized_size_mb: u64,
36 pub downloaded: bool,
37}
38
39pub struct ModelRegistry {
41 models_dir: PathBuf,
42 catalog: Vec<ModelSpec>,
43}
44
45struct ModelSpec {
46 name: &'static str,
47 hf_repo: &'static str,
48 hf_filename: &'static str,
49 tokenizer_repo: &'static str,
50 role: ModelRole,
51 param_count: &'static str,
52 quantized_size_mb: u64,
53}
54
55impl ModelRegistry {
56 pub fn new(models_dir: PathBuf) -> Self {
57 Self {
58 models_dir,
59 catalog: builtin_catalog(),
60 }
61 }
62
63 pub fn list_models(&self) -> Vec<ModelInfo> {
65 self.catalog
66 .iter()
67 .map(|spec| {
68 let local_path = self.models_dir.join(spec.name).join("model.gguf");
69 ModelInfo {
70 name: spec.name.to_string(),
71 hf_repo: spec.hf_repo.to_string(),
72 hf_filename: spec.hf_filename.to_string(),
73 tokenizer_repo: spec.tokenizer_repo.to_string(),
74 role: spec.role,
75 param_count: spec.param_count,
76 quantized_size_mb: spec.quantized_size_mb,
77 downloaded: local_path.exists(),
78 }
79 })
80 .collect()
81 }
82
83 fn find_spec(&self, name: &str) -> Option<&ModelSpec> {
85 self.catalog
86 .iter()
87 .find(|s| s.name.eq_ignore_ascii_case(name))
88 }
89
90 pub async fn ensure_model(&self, name: &str) -> Result<PathBuf, InferenceError> {
92 let spec = self.find_spec(name)
93 .ok_or_else(|| InferenceError::ModelNotFound(name.to_string()))?;
94
95 let model_dir = self.models_dir.join(spec.name);
96 let model_path = model_dir.join("model.gguf");
97 let tokenizer_path = model_dir.join("tokenizer.json");
98
99 if model_path.exists() && tokenizer_path.exists() {
100 return Ok(model_dir);
101 }
102
103 std::fs::create_dir_all(&model_dir)?;
104
105 if !model_path.exists() {
107 info!(model = spec.name, repo = spec.hf_repo, "downloading model weights");
108 download_file(spec.hf_repo, spec.hf_filename, &model_path).await?;
109 }
110
111 if !tokenizer_path.exists() {
113 info!(model = spec.name, repo = spec.tokenizer_repo, "downloading tokenizer");
114 download_file(spec.tokenizer_repo, "tokenizer.json", &tokenizer_path).await?;
115 }
116
117 Ok(model_dir)
118 }
119
120 pub fn remove_model(&self, name: &str) -> Result<(), InferenceError> {
122 let _spec = self.find_spec(name)
123 .ok_or_else(|| InferenceError::ModelNotFound(name.to_string()))?;
124
125 let model_dir = self.models_dir.join(name);
126 if model_dir.exists() {
127 std::fs::remove_dir_all(&model_dir)?;
128 info!(model = name, "removed model");
129 }
130 Ok(())
131 }
132}
133
134async fn download_file(repo: &str, filename: &str, dest: &Path) -> Result<(), InferenceError> {
136 let api = hf_hub::api::tokio::Api::new()
137 .map_err(|e| InferenceError::DownloadFailed(e.to_string()))?;
138
139 let repo = api.model(repo.to_string());
140 let path = repo
141 .get(filename)
142 .await
143 .map_err(|e| InferenceError::DownloadFailed(format!("{filename}: {e}")))?;
144
145 if dest.exists() {
147 return Ok(());
148 }
149
150 #[cfg(unix)]
152 {
153 if std::os::unix::fs::symlink(&path, dest).is_ok() {
154 return Ok(());
155 }
156 }
157
158 std::fs::copy(&path, dest)
159 .map_err(|e| InferenceError::DownloadFailed(format!("copy to {}: {e}", dest.display())))?;
160 Ok(())
161}
162
163fn builtin_catalog() -> Vec<ModelSpec> {
165 vec![
166 ModelSpec {
167 name: "Qwen3-Embedding-0.6B",
168 hf_repo: "Qwen/Qwen3-Embedding-0.6B-GGUF",
169 hf_filename: "Qwen3-Embedding-0.6B-Q8_0.gguf",
170 tokenizer_repo: "Qwen/Qwen3-Embedding-0.6B",
171 role: ModelRole::Embedding,
172 param_count: "0.6B",
173 quantized_size_mb: 639,
174 },
175 ModelSpec {
176 name: "Qwen3-0.6B",
177 hf_repo: "Qwen/Qwen3-0.6B-GGUF",
178 hf_filename: "Qwen3-0.6B-Q8_0.gguf",
179 tokenizer_repo: "Qwen/Qwen3-0.6B",
180 role: ModelRole::Small,
181 param_count: "0.6B",
182 quantized_size_mb: 650,
183 },
184 ModelSpec {
185 name: "Qwen3-1.7B",
186 hf_repo: "Qwen/Qwen3-1.7B-GGUF",
187 hf_filename: "Qwen3-1.7B-Q8_0.gguf",
188 tokenizer_repo: "Qwen/Qwen3-1.7B",
189 role: ModelRole::Medium,
190 param_count: "1.7B",
191 quantized_size_mb: 1800,
192 },
193 ModelSpec {
194 name: "Qwen3-4B",
195 hf_repo: "Qwen/Qwen3-4B-GGUF",
196 hf_filename: "Qwen3-4B-Q4_K_M.gguf",
197 tokenizer_repo: "Qwen/Qwen3-4B",
198 role: ModelRole::Medium,
199 param_count: "4B",
200 quantized_size_mb: 2500,
201 },
202 ModelSpec {
203 name: "Qwen3-8B",
204 hf_repo: "Qwen/Qwen3-8B-GGUF",
205 hf_filename: "Qwen3-8B-Q4_K_M.gguf",
206 tokenizer_repo: "Qwen/Qwen3-8B",
207 role: ModelRole::Large,
208 param_count: "8B",
209 quantized_size_mb: 4900,
210 },
211 ModelSpec {
212 name: "Qwen3-30B-A3B",
213 hf_repo: "Qwen/Qwen3-30B-A3B-GGUF",
214 hf_filename: "Qwen3-30B-A3B-Q4_K_M.gguf",
215 tokenizer_repo: "Qwen/Qwen3-30B-A3B",
216 role: ModelRole::Expert,
217 param_count: "30B (3B active)",
218 quantized_size_mb: 17000,
219 },
220 ]
221}