Skip to main content

ferrum_models/executor/
bert_executor.rs

1//! BERT Model Executor for embeddings
2//!
3//! BERT is an encoder model used for generating text embeddings.
4//! Unlike decoder models (LLaMA, Qwen), it doesn't generate tokens.
5
6use std::sync::Arc;
7
8use async_trait::async_trait;
9use candle_core::{DType, Device as CandleDevice, Tensor};
10use candle_nn::VarBuilder;
11use ferrum_interfaces::{
12    model_executor::{
13        AttentionType, DecodeInput, DecodeOutput, ExecutorCapabilities, ExecutorMemoryUsage,
14        ExecutorState, ExecutorStatus, MemoryRequirements, PrefillInput, PrefillOutput,
15    },
16    BlockTable, CacheHandleStats, KvCacheHandle, ModelExecutor, TensorRef,
17};
18use ferrum_types::{DataType, Device, FerrumError, ModelInfo, Result};
19use tracing::{debug, info};
20
21use crate::architectures::bert::BertModelWrapper;
22use crate::tensor_wrapper::CandleTensorWrapper;
23
24/// BERT Executor for embedding tasks
25pub struct BertModelExecutor {
26    model: BertModelWrapper,
27    info: ModelInfo,
28    device: CandleDevice,
29    status: ExecutorStatus,
30}
31
32impl BertModelExecutor {
33    /// Create a new BERT executor
34    pub fn new(model: BertModelWrapper, model_info: ModelInfo, device: CandleDevice) -> Self {
35        info!(
36            "Created BertModelExecutor for model: {}",
37            model_info.model_id
38        );
39
40        let status = ExecutorStatus {
41            state: ExecutorState::Ready,
42            is_ready: true,
43            current_batch_size: 0,
44            prefill_operations: 0,
45            decode_operations: 0,
46            avg_prefill_time_ms: 0.0,
47            avg_decode_time_ms: 0.0,
48            memory_usage: ExecutorMemoryUsage {
49                allocated_bytes: 0,
50                used_bytes: 0,
51                peak_bytes: 0,
52                utilization_percent: 0.0,
53            },
54            last_operation: None,
55        };
56
57        Self {
58            model,
59            info: model_info,
60            device,
61            status,
62        }
63    }
64
65    /// Load BERT executor from path
66    pub async fn from_path(
67        model_path: &str,
68        model_def: &crate::definition::ModelDefinition,
69        device: CandleDevice,
70    ) -> Result<Self> {
71        info!("Loading BERT model from: {}", model_path);
72
73        let path = std::path::Path::new(model_path);
74
75        // Find safetensors file
76        let safetensors_path = if path.join("model.safetensors").exists() {
77            path.join("model.safetensors")
78        } else {
79            // Look for any .safetensors file
80            std::fs::read_dir(path)
81                .map_err(|e| FerrumError::model(format!("Failed to read model dir: {}", e)))?
82                .filter_map(|e| e.ok())
83                .find(|e| {
84                    e.path()
85                        .extension()
86                        .map_or(false, |ext| ext == "safetensors")
87                })
88                .map(|e| e.path())
89                .ok_or_else(|| FerrumError::model("No safetensors file found"))?
90        };
91
92        info!("Loading weights from: {:?}", safetensors_path);
93
94        // Use F32 for BERT (better compatibility)
95        let dtype = DType::F32;
96
97        // Load weights
98        let vb = unsafe {
99            VarBuilder::from_mmaped_safetensors(&[&safetensors_path], dtype, &device)
100                .map_err(|e| FerrumError::model(format!("Failed to load weights: {}", e)))?
101        };
102
103        // Create model from config.json
104        // Note: Some models have "bert." prefix, some don't.
105        // sentence-transformers models typically don't have the prefix.
106        let config_path = path.join("config.json");
107        let model = BertModelWrapper::from_config_json(vb, &config_path, device.clone(), dtype)?;
108
109        // Create model info
110        let model_info = model_def.to_model_info(model_path.to_string());
111
112        Ok(Self::new(model, model_info, device))
113    }
114
115    /// Get embeddings for input tokens
116    pub fn get_embeddings(&self, input_ids: &[u32]) -> Result<Tensor> {
117        let seq_len = input_ids.len();
118
119        // Create input tensor
120        let input_tensor = Tensor::from_vec(
121            input_ids.iter().map(|&x| x as i64).collect::<Vec<_>>(),
122            (1, seq_len),
123            &self.device,
124        )
125        .map_err(|e| FerrumError::model(format!("Failed to create input tensor: {}", e)))?;
126
127        // Create token type ids (all zeros for single sentence)
128        let token_type_ids = Tensor::zeros((1, seq_len), DType::I64, &self.device)
129            .map_err(|e| FerrumError::model(format!("Failed to create token type ids: {}", e)))?;
130
131        // Get sentence embedding
132        self.model
133            .get_sentence_embedding(&input_tensor, &token_type_ids, None)
134    }
135
136    /// Get model reference
137    pub fn model(&self) -> &BertModelWrapper {
138        &self.model
139    }
140}
141
142/// Dummy KV cache for BERT (not used but required by interface)
143#[derive(Debug, Clone)]
144struct DummyBertCache;
145
146impl KvCacheHandle for DummyBertCache {
147    fn block_table(&self) -> &BlockTable {
148        static EMPTY: std::sync::OnceLock<BlockTable> = std::sync::OnceLock::new();
149        EMPTY.get_or_init(|| BlockTable::new(16))
150    }
151
152    fn block_table_mut(&mut self) -> &mut BlockTable {
153        unimplemented!("BERT does not use KV cache")
154    }
155
156    fn as_any(&self) -> &dyn std::any::Any {
157        self
158    }
159
160    fn device(&self) -> Device {
161        Device::CPU
162    }
163
164    fn num_layers(&self) -> usize {
165        0
166    }
167
168    fn num_heads(&self) -> usize {
169        0
170    }
171
172    fn head_dim(&self) -> usize {
173        0
174    }
175
176    fn key_cache(&self, _layer: usize) -> Result<Option<TensorRef>> {
177        Ok(None)
178    }
179
180    fn value_cache(&self, _layer: usize) -> Result<Option<TensorRef>> {
181        Ok(None)
182    }
183
184    fn clone_handle(&self) -> Result<Arc<dyn KvCacheHandle>> {
185        Ok(Arc::new(self.clone()))
186    }
187
188    fn stats(&self) -> CacheHandleStats {
189        CacheHandleStats {
190            memory_bytes: 0,
191            blocks_allocated: 0,
192            tokens_stored: 0,
193            utilization: 0.0,
194            last_access: std::time::Instant::now(),
195        }
196    }
197
198    fn is_valid(&self) -> bool {
199        true
200    }
201
202    fn cache_id(&self) -> String {
203        "bert_dummy_cache".to_string()
204    }
205}
206
207#[async_trait]
208impl ModelExecutor for BertModelExecutor {
209    fn info(&self) -> &ModelInfo {
210        &self.info
211    }
212
213    /// For BERT, prefill returns the embeddings (not logits)
214    async fn prefill(&self, input: &PrefillInput) -> Result<PrefillOutput> {
215        let token_ids: Vec<u32> = if let Ok(v) = input.input_ids.to_vec_u32() {
216            v
217        } else if let Ok(vf) = input.input_ids.to_vec_f32() {
218            vf.into_iter().map(|x| x as u32).collect()
219        } else {
220            return Err(FerrumError::backend("Unable to extract token ids"));
221        };
222
223        debug!("BERT prefill: {} tokens", token_ids.len());
224
225        let embeddings = self.get_embeddings(&token_ids)?;
226
227        // Wrap as TensorRef
228        let output_tensor: TensorRef = Arc::new(CandleTensorWrapper::new(embeddings));
229        let kv_cache: Arc<dyn KvCacheHandle> = Arc::new(DummyBertCache);
230
231        Ok(PrefillOutput::new(output_tensor, kv_cache))
232    }
233
234    /// BERT doesn't support decode (it's an encoder model)
235    async fn decode(&self, _input: &DecodeInput) -> Result<DecodeOutput> {
236        Err(FerrumError::model(
237            "BERT is an encoder model and does not support token generation. Use prefill() to get embeddings.",
238        ))
239    }
240
241    fn capabilities(&self) -> ExecutorCapabilities {
242        ExecutorCapabilities {
243            max_batch_size: 32,
244            max_sequence_length: self.info.max_sequence_length,
245            attention_mechanisms: vec![AttentionType::MultiHead],
246            supports_dynamic_batching: true,
247            supports_continuous_batching: false,
248            supports_speculative_decoding: false,
249            supports_tensor_parallelism: false,
250            supports_pipeline_parallelism: false,
251            supported_dtypes: vec![DataType::FP32],
252            supported_devices: vec![Device::CPU],
253            memory_requirements: MemoryRequirements {
254                parameter_memory: 0,
255                activation_memory_per_token: 0,
256                kv_cache_memory_per_token: 0,
257                overhead_memory: 0,
258            },
259        }
260    }
261
262    fn status(&self) -> ExecutorStatus {
263        self.status.clone()
264    }
265}