Skip to main content

ferrum_interfaces/
engine.rs

1//! Inference engine interface with streaming and batch support
2//!
3//! This module provides the top-level inference engine interface that
4//! orchestrates all other components: tokenizer, model executor, scheduler,
5//! and sampler.
6
7use async_trait::async_trait;
8use ferrum_types::{EngineConfig, InferenceRequest, InferenceResponse, Result, StreamChunk};
9use futures::Stream;
10use std::pin::Pin;
11
12/// Core inference engine trait
13#[async_trait]
14pub trait InferenceEngine: Send + Sync {
15    /// Execute single inference request
16    async fn infer(&self, request: InferenceRequest) -> Result<InferenceResponse>;
17
18    /// Execute streaming inference request
19    async fn infer_stream(
20        &self,
21        request: InferenceRequest,
22    ) -> Result<Pin<Box<dyn Stream<Item = Result<StreamChunk>> + Send>>>;
23
24    /// Get current engine status
25    async fn status(&self) -> ferrum_types::EngineStatus;
26
27    /// Shutdown engine gracefully
28    async fn shutdown(&self) -> Result<()>;
29
30    /// Get engine configuration
31    fn config(&self) -> &EngineConfig;
32
33    /// Get engine metrics
34    fn metrics(&self) -> ferrum_types::EngineMetrics;
35
36    /// Health check
37    async fn health_check(&self) -> ferrum_types::HealthStatus;
38
39    /// Embed raw text string → float vector (engine handles tokenization).
40    async fn embed_text(&self, _text: &str) -> Result<Vec<f32>> {
41        Err(ferrum_types::FerrumError::model(
42            "This engine does not support text embedding",
43        ))
44    }
45
46    /// Embed image (file path or base64) → float vector. Default: not supported.
47    async fn embed_image(&self, _image: &str) -> Result<Vec<f32>> {
48        Err(ferrum_types::FerrumError::model(
49            "This engine does not support image embedding",
50        ))
51    }
52
53    /// Get embedding dimension. Default: 0 (not an embedding model).
54    fn embedding_dim(&self) -> usize {
55        0
56    }
57}
58
59/// Advanced engine capabilities
60#[async_trait]
61pub trait AdvancedInferenceEngine: InferenceEngine {
62    /// Execute batch inference
63    async fn infer_batch(
64        &self,
65        requests: Vec<InferenceRequest>,
66    ) -> Result<Vec<Result<InferenceResponse>>>;
67
68    /// Execute speculative inference
69    async fn infer_speculative(
70        &self,
71        request: InferenceRequest,
72        speculation_config: ferrum_types::SpeculationConfig,
73    ) -> Result<InferenceResponse>;
74
75    /// Warm up engine with sample requests
76    async fn warmup(
77        &mut self,
78        warmup_requests: Vec<InferenceRequest>,
79    ) -> Result<ferrum_types::WarmupResult>;
80
81    /// Configure engine at runtime
82    async fn reconfigure(&mut self, config: EngineConfig) -> Result<()>;
83
84    /// Get detailed diagnostics
85    async fn diagnostics(&self) -> ferrum_types::DiagnosticsReport;
86
87    /// Export engine state for debugging
88    async fn export_state(&self) -> Result<ferrum_types::EngineState>;
89
90    /// Import engine state for debugging/testing
91    async fn import_state(&mut self, state: ferrum_types::EngineState) -> Result<()>;
92}
93
94/// Speculation configuration for speculative decoding
95pub type SpeculationConfig = ferrum_types::SpeculationConfig;
96
97/// Hardware constraints alias
98pub type HardwareConstraints = ferrum_types::HardwareConstraints;
99
100/// Request characteristics alias
101pub type RequestCharacteristics = ferrum_types::RequestCharacteristics;
102
103/// Latency requirements alias
104pub type LatencyRequirements = ferrum_types::LatencyRequirements;