realizar 0.8.5

Pure Rust ML inference engine built from scratch - model serving for GGUF and safetensors
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452

/// Process a batch of requests (PARITY-053)
///
/// Processes all requests in the batch and sends results via their oneshot channels.
/// Uses tokio::spawn to process requests concurrently within the batch.
#[cfg(feature = "gpu")]
async fn process_batch(
    model: &std::sync::Arc<crate::gguf::OwnedQuantizedModelCachedSync>,
    config: &BatchConfig,
    batch: &mut Vec<ContinuousBatchRequest>,
) {
    use std::time::Instant;

    if batch.is_empty() {
        return;
    }

    let batch_size = batch.len();
    let batch_start = Instant::now();

    // PARITY-095: Use configurable GPU batch threshold
    // GPU GEMM wins at batch >= gpu_threshold (default 32, from IMP-600 analysis)
    let gpu_threshold = config.gpu_threshold;

    // Use true GPU batch inference if batch is large enough and GPU cache is warm
    if batch_size >= gpu_threshold && model.is_gpu_cache_warm() {
        // PARITY-094: True batch inference with GPU FFN
        // Collect all prompts
        let prompts: Vec<Vec<u32>> = batch.iter().map(|r| r.prompt_tokens.clone()).collect();

        // Use first request's config (batch inference assumes similar parameters)
        let first = &batch[0];
        let gen_config = crate::gguf::QuantizedGenerateConfig {
            max_tokens: first.max_tokens,
            temperature: first.temperature,
            top_k: first.top_k,
            stop_tokens: Vec::new(),
            trace: false,
            ..Default::default()
        };

        // Run batch generation with GPU FFN (PARITY-021)
        let results = model.batch_generate_gpu(&prompts, &gen_config);

        let total_latency_ms = batch_start.elapsed().as_secs_f64() * 1000.0;
        let per_request_latency_ms = total_latency_ms / batch_size as f64;

        // Send responses
        match results {
            Ok(all_token_ids) => {
                for (request, token_ids) in batch.drain(..).zip(all_token_ids.into_iter()) {
                    let response = ContinuousBatchResponse {
                        token_ids,
                        prompt_len: request.prompt_tokens.len(),
                        batched: true,
                        batch_size,
                        latency_ms: per_request_latency_ms,
                    };
                    let _ = request.response_tx.send(response);
                }
            },
            Err(_) => {
                // Fallback: return prompts unchanged on error
                for request in batch.drain(..) {
                    let response = ContinuousBatchResponse {
                        token_ids: request.prompt_tokens.clone(),
                        prompt_len: request.prompt_tokens.len(),
                        batched: false,
                        batch_size,
                        latency_ms: per_request_latency_ms,
                    };
                    let _ = request.response_tx.send(response);
                }
            },
        }
    } else {
        // Concurrent single-request processing (for small batches or no GPU cache)
        let mut handles = Vec::with_capacity(batch_size);

        for request in batch.drain(..) {
            let model = model.clone();
            let handle = tokio::spawn(async move {
                let start = Instant::now();

                // Build generation config
                let gen_config = crate::gguf::QuantizedGenerateConfig {
                    max_tokens: request.max_tokens,
                    temperature: request.temperature,
                    top_k: request.top_k,
                    stop_tokens: Vec::new(),
                    trace: false,
            ..Default::default()
                };

                // Generate
                let result = model.generate_with_cache(&request.prompt_tokens, &gen_config);

                let latency_ms = start.elapsed().as_secs_f64() * 1000.0;

                // Send response
                let response = match result {
                    Ok(token_ids) => ContinuousBatchResponse {
                        token_ids,
                        prompt_len: request.prompt_tokens.len(),
                        batched: false,
                        batch_size: 1,
                        latency_ms,
                    },
                    Err(_) => ContinuousBatchResponse {
                        token_ids: request.prompt_tokens.clone(),
                        prompt_len: request.prompt_tokens.len(),
                        batched: false,
                        batch_size: 1,
                        latency_ms,
                    },
                };

                // Send response (ignore if receiver dropped)
                let _ = request.response_tx.send(response);
            });

            handles.push(handle);
        }

        // Wait for all to complete
        for handle in handles {
            let _ = handle.await;
        }
    }
}

/// GPU warmup handler (PARITY-022)
/// POST /v1/gpu/warmup - Warmup GPU cache for batch inference
#[cfg(feature = "gpu")]
pub async fn gpu_warmup_handler(
    State(state): State<AppState>,
) -> Result<Json<GpuWarmupResponse>, (StatusCode, Json<ErrorResponse>)> {
    if let Some(cached_model) = state.cached_model() {
        match cached_model.warmup_gpu_cache() {
            Ok((memory_bytes, num_layers)) => Ok(Json(GpuWarmupResponse {
                success: true,
                memory_bytes,
                num_layers,
                message: format!(
                    "GPU cache warmed up: {} layers, {:.2} GB",
                    num_layers,
                    memory_bytes as f64 / 1e9
                ),
            })),
            Err(e) => Err((
                StatusCode::INTERNAL_SERVER_ERROR,
                Json(ErrorResponse {
                    error: format!("GPU warmup failed: {e}"),
                }),
            )),
        }
    } else {
        Err((
            StatusCode::SERVICE_UNAVAILABLE,
            Json(ErrorResponse {
                error: "No GPU-capable model loaded. Use with_cached_model() to enable."
                    .to_string(),
            }),
        ))
    }
}

/// GPU warmup handler stub for non-GPU builds
#[cfg(not(feature = "gpu"))]
pub async fn gpu_warmup_handler(
    State(_state): State<AppState>,
) -> Result<Json<GpuWarmupResponse>, (StatusCode, Json<ErrorResponse>)> {
    Err((
        StatusCode::SERVICE_UNAVAILABLE,
        Json(ErrorResponse {
            error: "GPU feature not enabled. Build with --features gpu".to_string(),
        }),
    ))
}

/// GPU status handler (PARITY-022)
/// GET /v1/gpu/status - Check GPU cache status
#[cfg(feature = "gpu")]
pub async fn gpu_status_handler(
    State(state): State<AppState>,
) -> Result<Json<GpuStatusResponse>, (StatusCode, Json<ErrorResponse>)> {
    if let Some(cached_model) = state.cached_model() {
        Ok(Json(GpuStatusResponse {
            cache_ready: cached_model.is_gpu_cache_warm(),
            cache_memory_bytes: cached_model.gpu_cache_memory(),
            batch_threshold: 32, // GPU GEMM threshold from IMP-600
            recommended_min_batch: 32,
        }))
    } else {
        Ok(Json(GpuStatusResponse {
            cache_ready: false,
            cache_memory_bytes: 0,
            batch_threshold: 32,
            recommended_min_batch: 32,
        }))
    }
}

/// GPU status handler stub for non-GPU builds
#[cfg(not(feature = "gpu"))]
pub async fn gpu_status_handler(
    State(_state): State<AppState>,
) -> Result<Json<GpuStatusResponse>, (StatusCode, Json<ErrorResponse>)> {
    Ok(Json(GpuStatusResponse {
        cache_ready: false,
        cache_memory_bytes: 0,
        batch_threshold: 32,
        recommended_min_batch: 32,
    }))
}

/// GPU batch completions handler (PARITY-022)
/// POST /v1/batch/completions - GPU-accelerated batch inference
#[cfg(feature = "gpu")]
pub async fn gpu_batch_completions_handler(
    State(state): State<AppState>,
    Json(request): Json<GpuBatchRequest>,
) -> Result<Json<GpuBatchResponse>, (StatusCode, Json<ErrorResponse>)> {
    use std::time::Instant;

    if request.prompts.is_empty() {
        return Err((
            StatusCode::BAD_REQUEST,
            Json(ErrorResponse {
                error: "Prompts array cannot be empty".to_string(),
            }),
        ));
    }

    let Some(cached_model) = state.cached_model() else {
        return Err((
            StatusCode::SERVICE_UNAVAILABLE,
            Json(ErrorResponse {
                error: "No GPU-capable model loaded".to_string(),
            }),
        ));
    };

    // Check if GPU cache is ready
    let gpu_ready = cached_model.is_gpu_cache_warm();
    let batch_size = request.prompts.len();

    // Tokenize all prompts
    // For GPU batch, we need token IDs as Vec<Vec<u32>>
    let prompts_tokens: Vec<Vec<u32>> = request
        .prompts
        .iter()
        .map(|p| {
            // Simple tokenization for batch - uses model's vocab
            // In production, use a proper tokenizer
            p.bytes().map(|b| b as u32).collect()
        })
        .collect();

    // Create generation config
    let gen_config = crate::gguf::QuantizedGenerateConfig {
        max_tokens: request.max_tokens,
        temperature: request.temperature,
        top_k: request.top_k,
        stop_tokens: vec![],
        trace: false,
            ..Default::default()
    };

    let start = Instant::now();

    // Decide GPU vs CPU path based on cache readiness and batch size
    let gpu_threshold = 32;
    let use_gpu = gpu_ready && batch_size >= gpu_threshold;

    let results = if use_gpu {
        // GPU batch inference path
        match cached_model.batch_generate_gpu(&prompts_tokens, &gen_config) {
            Ok(generated) => generated,
            Err(e) => {
                return Err((
                    StatusCode::INTERNAL_SERVER_ERROR,
                    Json(ErrorResponse {
                        error: format!("GPU batch generation failed: {e}"),
                    }),
                ));
            },
        }
    } else {
        // CPU sequential path (fallback)
        let mut results = Vec::with_capacity(batch_size);
        for prompt in &prompts_tokens {
            match cached_model.generate_with_cache(prompt, &gen_config) {
                Ok(tokens) => results.push(tokens),
                Err(e) => {
                    return Err((
                        StatusCode::INTERNAL_SERVER_ERROR,
                        Json(ErrorResponse {
                            error: format!("Generation failed: {e}"),
                        }),
                    ));
                },
            }
        }
        results
    };

    let elapsed = start.elapsed();
    let total_tokens: usize = results.iter().map(Vec::len).sum();
    let throughput_tps = total_tokens as f64 / elapsed.as_secs_f64();

    // Build response
    let batch_results: Vec<GpuBatchResult> = results
        .into_iter()
        .enumerate()
        .map(|(idx, tokens)| {
            let prompt_len = prompts_tokens.get(idx).map_or(0, Vec::len);
            let num_generated = tokens.len().saturating_sub(prompt_len);
            GpuBatchResult {
                index: idx,
                token_ids: tokens.clone(),
                text: tokens.iter().map(|&t| t as u8 as char).collect(),
                num_generated,
            }
        })
        .collect();

    Ok(Json(GpuBatchResponse {
        results: batch_results,
        stats: GpuBatchStats {
            batch_size,
            gpu_used: use_gpu,
            total_tokens,
            processing_time_ms: elapsed.as_secs_f64() * 1000.0,
            throughput_tps,
        },
    }))
}

/// GPU batch completions handler stub for non-GPU builds
#[cfg(not(feature = "gpu"))]
pub async fn gpu_batch_completions_handler(
    State(_state): State<AppState>,
    Json(_request): Json<GpuBatchRequest>,
) -> Result<Json<GpuBatchResponse>, (StatusCode, Json<ErrorResponse>)> {
    Err((
        StatusCode::SERVICE_UNAVAILABLE,
        Json(ErrorResponse {
            error: "GPU feature not enabled. Build with --features gpu".to_string(),
        }),
    ))
}

/// Models list handler - returns available models in multi-model mode
pub async fn models_handler(
    State(state): State<AppState>,
) -> Result<Json<ModelsResponse>, (StatusCode, Json<ErrorResponse>)> {
    if let Some(registry) = &state.registry {
        let models = registry.list();
        Ok(Json(ModelsResponse { models }))
    } else {
        // Single model mode - return the single model info
        Ok(Json(ModelsResponse {
            models: vec![ModelInfo {
                id: "default".to_string(),
                name: "Default Model".to_string(),
                description: "Single model deployment".to_string(),
                format: "unknown".to_string(),
                loaded: true,
            }],
        }))
    }
}

/// Tokenize text handler
pub async fn tokenize_handler(
    State(state): State<AppState>,
    Json(request): Json<TokenizeRequest>,
) -> Result<Json<TokenizeResponse>, (StatusCode, Json<ErrorResponse>)> {
    let (_model, tokenizer) = state.get_model(request.model_id.as_deref()).map_err(|e| {
        (
            StatusCode::NOT_FOUND,
            Json(ErrorResponse {
                error: e.to_string(),
            }),
        )
    })?;

    let token_ids = tokenizer.encode(&request.text);
    let num_tokens = token_ids.len();

    Ok(Json(TokenizeResponse {
        token_ids,
        num_tokens,
    }))
}

// ── generate_handler backend dispatch ────────────────────────────────

/// Generate text handler
#[cfg(feature = "cuda")]
fn try_cuda_generate(
    state: &AppState,
    request: &GenerateRequest,
) -> Result<Option<GenerateResponse>, ApiErr> {
    use crate::gguf::QuantizedGenerateConfig;

    let cuda_model_lock = match state.cuda_model() {
        Some(l) => l,
        None => return Ok(None),
    };
    let tokenizer = require_tok(state)?;
    let prompt_ids = tokenize_prompt(&tokenizer, &request.prompt)?;
    let prompt_tokens = prompt_ids.len();

    let q_config = QuantizedGenerateConfig {
        max_tokens: request.max_tokens,
        temperature: request.temperature,
        top_k: if request.temperature == 0.0 {
            1
        } else {
            request.top_k
        },
        stop_tokens: vec![eos_id(&tokenizer, state.model_eos_token_id())],
        trace: false,
                ..Default::default()
    };

    let mut cuda_model = cuda_model_lock.write().map_err(|_| {
        api_err(
            StatusCode::INTERNAL_SERVER_ERROR,
            "Failed to acquire CUDA model lock",
        )
    })?;
    let generated = cuda_model
        .generate_gpu_resident(&prompt_ids, &q_config)
        .map_err(|e| {
            api_err(
                StatusCode::INTERNAL_SERVER_ERROR,
                format!("CUDA generation failed: {e}"),
            )
        })?;
    let text = tokenizer
        .decode(&generated)
        .map_err(|e| api_err(StatusCode::INTERNAL_SERVER_ERROR, e))?;

    Ok(Some(GenerateResponse {
        num_generated: generated.len().saturating_sub(prompt_tokens),
        token_ids: generated,
        text,
    }))
}