pub trait DynInferenceBackend: Send + Sync {
// Required methods
fn load_dyn(
&mut self,
model_path: PathBuf,
config: LoadConfig,
) -> Pin<Box<dyn Future<Output = Result<(), NativeError>> + Send + '_>>;
fn unload_dyn(
&mut self,
) -> Pin<Box<dyn Future<Output = Result<(), NativeError>> + Send + '_>>;
fn is_loaded_dyn(&self) -> bool;
fn model_info_dyn(&self) -> Option<ModelInfo>;
fn infer_dyn(
&self,
prompt: String,
options: ChatOptions,
) -> Pin<Box<dyn Future<Output = Result<ChatResponse, NativeError>> + Send + '_>>;
fn infer_stream_dyn(
&self,
prompt: String,
options: ChatOptions,
) -> Pin<Box<dyn Future<Output = Result<Pin<Box<dyn Stream<Item = Result<String, NativeError>> + Send + 'static>>, NativeError>> + Send + '_>>;
}Expand description
Object-safe version of InferenceBackend for dynamic dispatch.
Use this when you need runtime polymorphism (e.g., Box<dyn DynInferenceBackend>).
Note: This trait takes owned String instead of &str for prompts
to enable object-safe async methods.
Required Methods§
Sourcefn load_dyn(
&mut self,
model_path: PathBuf,
config: LoadConfig,
) -> Pin<Box<dyn Future<Output = Result<(), NativeError>> + Send + '_>>
fn load_dyn( &mut self, model_path: PathBuf, config: LoadConfig, ) -> Pin<Box<dyn Future<Output = Result<(), NativeError>> + Send + '_>>
Load a model from disk (boxed future for object safety).
Sourcefn unload_dyn(
&mut self,
) -> Pin<Box<dyn Future<Output = Result<(), NativeError>> + Send + '_>>
fn unload_dyn( &mut self, ) -> Pin<Box<dyn Future<Output = Result<(), NativeError>> + Send + '_>>
Unload the model from memory (boxed future for object safety).
Sourcefn is_loaded_dyn(&self) -> bool
fn is_loaded_dyn(&self) -> bool
Check if a model is currently loaded.
Sourcefn model_info_dyn(&self) -> Option<ModelInfo>
fn model_info_dyn(&self) -> Option<ModelInfo>
Get metadata about the loaded model (cloned for object safety).
Sourcefn infer_dyn(
&self,
prompt: String,
options: ChatOptions,
) -> Pin<Box<dyn Future<Output = Result<ChatResponse, NativeError>> + Send + '_>>
fn infer_dyn( &self, prompt: String, options: ChatOptions, ) -> Pin<Box<dyn Future<Output = Result<ChatResponse, NativeError>> + Send + '_>>
Generate a response (boxed future for object safety).
Takes owned String instead of &str for object safety.
Sourcefn infer_stream_dyn(
&self,
prompt: String,
options: ChatOptions,
) -> Pin<Box<dyn Future<Output = Result<Pin<Box<dyn Stream<Item = Result<String, NativeError>> + Send + 'static>>, NativeError>> + Send + '_>>
fn infer_stream_dyn( &self, prompt: String, options: ChatOptions, ) -> Pin<Box<dyn Future<Output = Result<Pin<Box<dyn Stream<Item = Result<String, NativeError>> + Send + 'static>>, NativeError>> + Send + '_>>
Generate a streaming response (boxed stream for object safety).
Takes owned String instead of &str for object safety.
Implementors§
impl<T: InferenceBackend + 'static> DynInferenceBackend for T
Blanket implementation of DynInferenceBackend for any InferenceBackend.