Skip to main content

ferrum_interfaces/
model_executor.rs

1//! Model execution interface with clear prefill/decode separation
2//!
3//! This module provides the ModelExecutor trait that replaces the "fat" Model
4//! interface, focusing purely on tensor operations without tokenization or sampling.
5
6use crate::{KvCacheHandle, TensorRef};
7use async_trait::async_trait;
8use ferrum_types::{ModelInfo, Result};
9use serde::{Deserialize, Serialize};
10use std::{collections::HashMap, sync::Arc};
11
12/// Input for prefill phase (processing the initial prompt)
13#[derive(Debug, Clone)]
14pub struct PrefillInput {
15    /// Input token IDs [batch_size, sequence_length]
16    pub input_ids: TensorRef,
17    /// Attention mask [batch_size, sequence_length] (optional)
18    pub attention_mask: Option<TensorRef>,
19    /// Position IDs [batch_size, sequence_length] (optional, for RoPE)
20    pub position_ids: Option<TensorRef>,
21    /// Pre-allocated KV cache handle (optional, for paged attention)
22    pub kv_cache: Option<Arc<dyn KvCacheHandle>>,
23}
24
25impl PrefillInput {
26    /// Create new prefill input
27    pub fn new(input_ids: TensorRef) -> Self {
28        Self {
29            input_ids,
30            attention_mask: None,
31            position_ids: None,
32            kv_cache: None,
33        }
34    }
35
36    /// Create prefill input with a pre-allocated KV cache handle.
37    pub fn with_kv_cache(mut self, kv_cache: Arc<dyn KvCacheHandle>) -> Self {
38        self.kv_cache = Some(kv_cache);
39        self
40    }
41
42    /// Add attention mask
43    pub fn with_attention_mask(mut self, mask: TensorRef) -> Self {
44        self.attention_mask = Some(mask);
45        self
46    }
47
48    /// Add position IDs
49    pub fn with_position_ids(mut self, positions: TensorRef) -> Self {
50        self.position_ids = Some(positions);
51        self
52    }
53
54    /// Get batch size
55    pub fn batch_size(&self) -> usize {
56        self.input_ids.shape()[0]
57    }
58
59    /// Get sequence length
60    pub fn sequence_length(&self) -> usize {
61        if self.input_ids.shape().len() >= 2 {
62            self.input_ids.shape()[1]
63        } else {
64            1
65        }
66    }
67}
68
69/// Output from prefill phase
70#[derive(Debug, Clone)]
71pub struct PrefillOutput {
72    /// Logits for all positions [batch_size, sequence_length, vocab_size]
73    pub logits: TensorRef,
74    /// KV cache handle populated with prompt states
75    pub kv_cache: Arc<dyn KvCacheHandle>,
76    /// Hidden states at each layer (optional, for analysis)
77    pub hidden_states: Option<Vec<TensorRef>>,
78    /// Attention weights (optional, for analysis)
79    pub attention_weights: Option<Vec<TensorRef>>,
80}
81
82impl PrefillOutput {
83    /// Create new prefill output
84    pub fn new(logits: TensorRef, kv_cache: Arc<dyn KvCacheHandle>) -> Self {
85        Self {
86            logits,
87            kv_cache,
88            hidden_states: None,
89            attention_weights: None,
90        }
91    }
92
93    /// Get logits for last position (for next token generation)
94    pub fn last_token_logits(&self) -> Result<TensorRef> {
95        let shape = self.logits.shape();
96        if shape.len() != 3 {
97            return Err(ferrum_types::FerrumError::backend(
98                "Expected 3D logits tensor [batch, seq, vocab]",
99            ));
100        }
101
102        let seq_len = shape[1];
103        if seq_len == 0 {
104            return Err(ferrum_types::FerrumError::backend("Empty sequence"));
105        }
106
107        // Extract last position: [batch, seq-1:seq, vocab] -> [batch, vocab]
108        self.logits
109            .view(&[0, seq_len - 1, 0], &[shape[0], seq_len, shape[2]])
110    }
111}
112
113/// Input for decode phase (generating one token at a time)
114#[derive(Debug, Clone)]
115pub struct DecodeInput {
116    /// Input token ID for current step [batch_size, 1]
117    pub input_ids: TensorRef,
118    /// Existing KV cache from previous steps
119    pub kv_cache: Arc<dyn KvCacheHandle>,
120    /// Position IDs for current step [batch_size, 1] (optional)
121    pub position_ids: Option<TensorRef>,
122}
123
124impl DecodeInput {
125    /// Create new decode input
126    pub fn new(input_ids: TensorRef, kv_cache: Arc<dyn KvCacheHandle>) -> Self {
127        Self {
128            input_ids,
129            kv_cache,
130            position_ids: None,
131        }
132    }
133
134    /// Add position IDs
135    pub fn with_position_ids(mut self, positions: TensorRef) -> Self {
136        self.position_ids = Some(positions);
137        self
138    }
139
140    /// Get batch size
141    pub fn batch_size(&self) -> usize {
142        self.input_ids.shape()[0]
143    }
144}
145
146/// Output from decode phase
147#[derive(Debug, Clone)]
148pub struct DecodeOutput {
149    /// Logits for next token [batch_size, vocab_size]
150    pub logits: TensorRef,
151    /// Updated KV cache with new token state
152    pub kv_cache: Arc<dyn KvCacheHandle>,
153    /// Hidden state for current token (optional)
154    pub hidden_state: Option<TensorRef>,
155    /// Attention weights for current token (optional)
156    pub attention_weights: Option<Vec<TensorRef>>,
157}
158
159impl DecodeOutput {
160    /// Create new decode output
161    pub fn new(logits: TensorRef, kv_cache: Arc<dyn KvCacheHandle>) -> Self {
162        Self {
163            logits,
164            kv_cache,
165            hidden_state: None,
166            attention_weights: None,
167        }
168    }
169}
170
171/// Core model executor trait focusing on tensor operations
172#[async_trait]
173pub trait ModelExecutor: Send + Sync {
174    /// Get model information and metadata
175    fn info(&self) -> &ModelInfo;
176
177    /// Execute prefill phase (process initial prompt)
178    async fn prefill(&self, input: &PrefillInput) -> Result<PrefillOutput>;
179
180    /// Execute decode phase (generate next token)
181    async fn decode(&self, input: &DecodeInput) -> Result<DecodeOutput>;
182
183    /// Batch decode: process multiple sequences in one forward pass.
184    ///
185    /// Default implementation falls back to per-request `decode()`.
186    /// Executors with batched CUDA runners should override this.
187    async fn batch_decode(&self, inputs: &[DecodeInput]) -> Result<Vec<DecodeOutput>> {
188        let mut outputs = Vec::with_capacity(inputs.len());
189        for input in inputs {
190            outputs.push(self.decode(input).await?);
191        }
192        Ok(outputs)
193    }
194
195    /// Optional: full forward pass (for non-autoregressive use cases)
196    async fn forward(&self, _input: &TensorRef) -> Result<TensorRef> {
197        // Default implementation not supported
198        Err(ferrum_types::FerrumError::unsupported(
199            "Full forward pass not supported by this executor",
200        ))
201    }
202
203    /// Get executor capabilities
204    fn capabilities(&self) -> ExecutorCapabilities;
205
206    /// Get current executor status
207    fn status(&self) -> ExecutorStatus;
208
209    /// Warm up executor (load model, allocate memory, etc.)
210    async fn warmup(&mut self) -> Result<()> {
211        // Default no-op implementation
212        Ok(())
213    }
214
215    /// Shutdown executor gracefully
216    async fn shutdown(&mut self) -> Result<()> {
217        // Default no-op implementation
218        Ok(())
219    }
220
221    /// Release KV cache and state for a completed sequence.
222    ///
223    /// Called by the engine when a request finishes (success or error) to free
224    /// GPU memory held by the sequence's KV cache. The `cache_id` matches the
225    /// value embedded in the `KvCacheHandle` returned by prefill/decode.
226    fn release_cache(&self, _cache_id: &str) {
227        // Default no-op — executors that manage per-sequence KV caches should override.
228    }
229}
230
231/// Executor capabilities and configuration
232#[derive(Debug, Clone, Serialize, Deserialize)]
233pub struct ExecutorCapabilities {
234    /// Maximum supported batch size
235    pub max_batch_size: usize,
236    /// Maximum sequence length
237    pub max_sequence_length: usize,
238    /// Supported attention mechanisms
239    pub attention_mechanisms: Vec<AttentionType>,
240    /// Whether executor supports dynamic batching
241    pub supports_dynamic_batching: bool,
242    /// Whether executor supports continuous batching
243    pub supports_continuous_batching: bool,
244    /// Whether executor supports speculative decoding
245    pub supports_speculative_decoding: bool,
246    /// Whether executor supports tensor parallelism
247    pub supports_tensor_parallelism: bool,
248    /// Whether executor supports pipeline parallelism
249    pub supports_pipeline_parallelism: bool,
250    /// Supported data types
251    pub supported_dtypes: Vec<ferrum_types::DataType>,
252    /// Supported devices
253    pub supported_devices: Vec<ferrum_types::Device>,
254    /// Memory requirements estimation
255    pub memory_requirements: MemoryRequirements,
256}
257
258/// Attention mechanism types
259#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
260pub enum AttentionType {
261    /// Standard multi-head attention
262    MultiHead,
263    /// Multi-query attention (MQA)
264    MultiQuery,
265    /// Grouped-query attention (GQA)
266    GroupedQuery,
267    /// Flash attention
268    Flash,
269    /// Paged attention
270    Paged,
271    /// Sliding window attention
272    SlidingWindow,
273}
274
275/// Memory requirements for model execution
276#[derive(Debug, Clone, Serialize, Deserialize)]
277pub struct MemoryRequirements {
278    /// Model parameter memory in bytes
279    pub parameter_memory: u64,
280    /// Minimum activation memory per token
281    pub activation_memory_per_token: usize,
282    /// KV cache memory per token per layer
283    pub kv_cache_memory_per_token: usize,
284    /// Additional overhead memory
285    pub overhead_memory: u64,
286}
287
288impl MemoryRequirements {
289    /// Calculate total memory for given configuration
290    pub fn calculate_total_memory(
291        &self,
292        batch_size: usize,
293        sequence_length: usize,
294        num_layers: usize,
295    ) -> u64 {
296        let activation_mem =
297            (self.activation_memory_per_token * batch_size * sequence_length) as u64;
298        let kv_cache_mem =
299            (self.kv_cache_memory_per_token * batch_size * sequence_length * num_layers) as u64;
300
301        self.parameter_memory + activation_mem + kv_cache_mem + self.overhead_memory
302    }
303}
304
305/// Executor status information
306#[derive(Debug, Clone, Serialize, Deserialize)]
307pub struct ExecutorStatus {
308    /// Current executor state
309    pub state: ExecutorState,
310    /// Whether executor is ready to accept requests
311    pub is_ready: bool,
312    /// Current batch size being processed
313    pub current_batch_size: usize,
314    /// Number of prefill operations completed
315    pub prefill_operations: u64,
316    /// Number of decode operations completed
317    pub decode_operations: u64,
318    /// Average prefill time in milliseconds
319    pub avg_prefill_time_ms: f64,
320    /// Average decode time in milliseconds
321    pub avg_decode_time_ms: f64,
322    /// Memory usage statistics
323    pub memory_usage: ExecutorMemoryUsage,
324    /// Last operation timestamp
325    #[serde(skip)]
326    pub last_operation: Option<std::time::Instant>,
327}
328
329/// Executor state
330#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
331pub enum ExecutorState {
332    /// Executor is initializing
333    Initializing,
334    /// Executor is ready to accept requests
335    Ready,
336    /// Executor is processing requests
337    Busy,
338    /// Executor encountered an error
339    Error,
340    /// Executor is shutting down
341    Shutdown,
342}
343
344/// Executor memory usage
345#[derive(Debug, Clone, Serialize, Deserialize)]
346pub struct ExecutorMemoryUsage {
347    /// Total allocated memory in bytes
348    pub allocated_bytes: usize,
349    /// Currently used memory in bytes
350    pub used_bytes: usize,
351    /// Peak memory usage
352    pub peak_bytes: usize,
353    /// Memory utilization percentage
354    pub utilization_percent: f32,
355}
356
357/// Batch model executor for processing multiple requests efficiently
358#[async_trait]
359pub trait BatchModelExecutor: ModelExecutor {
360    /// Execute batch prefill for multiple sequences
361    async fn batch_prefill(&self, inputs: &[PrefillInput]) -> Result<Vec<PrefillOutput>>;
362
363    /// Execute batch decode for multiple sequences
364    async fn batch_decode(&self, inputs: &[DecodeInput]) -> Result<Vec<DecodeOutput>>;
365
366    /// Get optimal batch size for current conditions
367    fn optimal_batch_size(&self) -> usize;
368
369    /// Check if batch size is supported
370    fn supports_batch_size(&self, batch_size: usize) -> bool;
371}
372
373/// Speculative execution support
374#[async_trait]
375pub trait SpeculativeExecutor: ModelExecutor {
376    /// Execute speculative decoding with draft model
377    async fn speculative_decode(
378        &self,
379        input: &DecodeInput,
380        draft_tokens: &[ferrum_types::TokenId],
381        acceptance_threshold: f32,
382    ) -> Result<SpeculativeDecodeOutput>;
383}
384
385/// Output from speculative decoding
386#[derive(Debug, Clone)]
387pub struct SpeculativeDecodeOutput {
388    /// Accepted tokens (subset of draft tokens)
389    pub accepted_tokens: Vec<ferrum_types::TokenId>,
390    /// Logits for the next token after last accepted
391    pub next_logits: TensorRef,
392    /// Updated KV cache
393    pub kv_cache: Arc<dyn KvCacheHandle>,
394    /// Number of draft tokens accepted
395    pub acceptance_count: usize,
396}
397
398/// Model executor factory
399#[async_trait]
400pub trait ModelExecutorFactory: Send + Sync {
401    /// Create executor from model configuration
402    async fn create_executor(&self, config: &ExecutorConfig) -> Result<Box<dyn ModelExecutor>>;
403
404    /// Create batch executor
405    async fn create_batch_executor(
406        &self,
407        config: &ExecutorConfig,
408    ) -> Result<Box<dyn BatchModelExecutor>>;
409
410    /// Get supported executor types
411    fn supported_types(&self) -> Vec<ExecutorType>;
412
413    /// Validate configuration
414    fn validate_config(&self, config: &ExecutorConfig) -> Result<()>;
415}
416
417/// Executor configuration
418#[derive(Debug, Clone, Serialize, Deserialize)]
419pub struct ExecutorConfig {
420    /// Model information
421    pub model_info: ModelInfo,
422    /// Target device
423    pub device: ferrum_types::Device,
424    /// Data type for computation
425    pub dtype: ferrum_types::DataType,
426    /// Maximum batch size
427    pub max_batch_size: usize,
428    /// Maximum sequence length
429    pub max_sequence_length: usize,
430    /// Attention configuration
431    pub attention_config: ExecutorAttentionConfig,
432    /// Memory configuration
433    pub memory_config: ExecutorMemoryConfig,
434    /// Optimization settings
435    pub optimization_config: OptimizationConfig,
436    /// Additional executor-specific options
437    pub executor_options: HashMap<String, serde_json::Value>,
438}
439
440/// Runtime attention configuration for model executor
441///
442/// Note: This is different from ferrum_types::AttentionConfig which describes
443/// the model architecture's attention configuration from config.json.
444/// This type describes the runtime execution settings.
445#[derive(Debug, Clone, Serialize, Deserialize)]
446pub struct ExecutorAttentionConfig {
447    /// Type of attention to use
448    pub attention_type: AttentionType,
449    /// Enable flash attention if available
450    pub enable_flash_attention: bool,
451    /// Enable paged attention
452    pub enable_paged_attention: bool,
453    /// Block size for paged attention
454    pub block_size: Option<usize>,
455    /// Sliding window size (if using sliding window attention)
456    pub sliding_window_size: Option<usize>,
457}
458
459/// Memory configuration for executor
460#[derive(Debug, Clone, Serialize, Deserialize)]
461pub struct ExecutorMemoryConfig {
462    /// Enable memory pooling
463    pub enable_memory_pooling: bool,
464    /// Memory pool size in bytes (None for auto)
465    pub memory_pool_size: Option<usize>,
466    /// Enable KV cache sharing
467    pub enable_kv_cache_sharing: bool,
468    /// Maximum memory usage percentage
469    pub max_memory_usage: f32,
470}
471
472/// Optimization configuration
473#[derive(Debug, Clone, Serialize, Deserialize)]
474pub struct OptimizationConfig {
475    /// Enable CUDA graphs (if supported)
476    pub enable_cuda_graphs: bool,
477    /// Enable kernel fusion
478    pub enable_kernel_fusion: bool,
479    /// Enable mixed precision
480    pub enable_mixed_precision: bool,
481    /// Optimization level (0-3)
482    pub optimization_level: u8,
483    /// Custom optimization flags
484    pub custom_flags: HashMap<String, bool>,
485}
486
487/// Supported executor types
488#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
489pub enum ExecutorType {
490    /// Standard sequential executor
491    Sequential,
492    /// Batch executor for parallel processing
493    Batch,
494    /// Continuous batching executor
495    ContinuousBatch,
496    /// Speculative decoding executor
497    Speculative,
498    /// Pipeline parallel executor
499    PipelineParallel,
500    /// Tensor parallel executor
501    TensorParallel,
502}
503
504/// Executor performance metrics
505#[derive(Debug, Clone, Serialize, Deserialize)]
506pub struct ExecutorMetrics {
507    /// Total operations executed
508    pub total_operations: u64,
509    /// Prefill operations
510    pub prefill_operations: u64,
511    /// Decode operations
512    pub decode_operations: u64,
513    /// Average prefill latency (ms)
514    pub avg_prefill_latency: f64,
515    /// Average decode latency (ms)
516    pub avg_decode_latency: f64,
517    /// P95 prefill latency (ms)
518    pub p95_prefill_latency: f64,
519    /// P95 decode latency (ms)
520    pub p95_decode_latency: f64,
521    /// Throughput (tokens per second)
522    pub throughput_tps: f64,
523    /// Memory efficiency (used/allocated)
524    pub memory_efficiency: f32,
525    /// Batch utilization
526    pub batch_utilization: f32,
527}
528
529/// Executor registry for managing multiple executors
530pub trait ExecutorRegistry: Send + Sync {
531    /// Register executor with name
532    fn register(&mut self, name: &str, executor: Box<dyn ModelExecutor>) -> Result<()>;
533
534    /// Get executor by name
535    fn get(&self, name: &str) -> Option<&dyn ModelExecutor>;
536
537    /// Remove executor by name
538    fn remove(&mut self, name: &str) -> Option<Box<dyn ModelExecutor>>;
539
540    /// List registered executor names
541    fn list_names(&self) -> Vec<String>;
542
543    /// Get executor metrics
544    fn get_metrics(&self, name: &str) -> Option<ExecutorMetrics>;
545}