pub struct InferenceSession { /* private fields */ }Implementations§
Source§impl InferenceSession
impl InferenceSession
pub fn new(model_type: String) -> Result<InferenceSession, JsValue>
Sourcepub async fn initialize_with_auto_device(&mut self) -> Result<(), JsValue>
pub async fn initialize_with_auto_device(&mut self) -> Result<(), JsValue>
Initialize with automatic device selection
pub async fn load_model(&mut self, model_data: &[u8]) -> Result<(), JsValue>
pub fn predict(&mut self, input: &WasmTensor) -> Result<WasmTensor, JsValue>
Sourcepub fn enable_debug_logging(&mut self, config: DebugConfig)
pub fn enable_debug_logging(&mut self, config: DebugConfig)
Initialize debug logging
Sourcepub fn disable_debug_logging(&mut self)
pub fn disable_debug_logging(&mut self)
Disable debug logging
Sourcepub fn start_timer(&mut self, operation: &str)
pub fn start_timer(&mut self, operation: &str)
Start a performance timer
Sourcepub fn get_performance_summary(&self) -> Option<String>
pub fn get_performance_summary(&self) -> Option<String>
Get debug performance summary
Sourcepub fn export_debug_logs(&self) -> Option<String>
pub fn export_debug_logs(&self) -> Option<String>
Export debug logs
Sourcepub fn clear_debug_logs(&mut self)
pub fn clear_debug_logs(&mut self)
Clear debug logs
Sourcepub fn log_memory_usage(&mut self, context: &str)
pub fn log_memory_usage(&mut self, context: &str)
Log memory usage with context
Sourcepub fn enable_quantization(&mut self, config: QuantizationConfig)
pub fn enable_quantization(&mut self, config: QuantizationConfig)
Initialize quantization with configuration
Sourcepub fn disable_quantization(&mut self)
pub fn disable_quantization(&mut self)
Disable quantization
Sourcepub async fn load_model_with_quantization(
&mut self,
model_data: &[u8],
) -> Result<(), JsValue>
pub async fn load_model_with_quantization( &mut self, model_data: &[u8], ) -> Result<(), JsValue>
Load model with automatic quantization
Sourcepub fn get_quantization_recommendations(
&self,
model_size_bytes: usize,
) -> Option<QuantizationConfig>
pub fn get_quantization_recommendations( &self, model_size_bytes: usize, ) -> Option<QuantizationConfig>
Get quantization recommendations for current model
Sourcepub fn should_quantize_model(&self, model_size_bytes: usize) -> bool
pub fn should_quantize_model(&self, model_size_bytes: usize) -> bool
Check if quantization would be beneficial for a given model size
Sourcepub fn enable_batch_processing(&mut self, config: BatchConfig)
pub fn enable_batch_processing(&mut self, config: BatchConfig)
Initialize batch processing with configuration
Sourcepub fn disable_batch_processing(&mut self)
pub fn disable_batch_processing(&mut self)
Disable batch processing
Sourcepub fn add_batch_request(
&mut self,
input: &WasmTensor,
priority: Priority,
timeout_ms: Option<u32>,
) -> Option<String>
pub fn add_batch_request( &mut self, input: &WasmTensor, priority: Priority, timeout_ms: Option<u32>, ) -> Option<String>
Add a request to the batch queue
Sourcepub async fn process_batch(&mut self) -> Result<Vec<BatchResponse>, JsValue>
pub async fn process_batch(&mut self) -> Result<Vec<BatchResponse>, JsValue>
Process pending batch requests
Sourcepub fn is_batch_ready(&self) -> bool
pub fn is_batch_ready(&self) -> bool
Check if a batch is ready for processing
Sourcepub fn get_batch_queue_length(&self) -> usize
pub fn get_batch_queue_length(&self) -> usize
Get current batch queue length
Sourcepub fn get_batch_stats(&self) -> Option<String>
pub fn get_batch_stats(&self) -> Option<String>
Get batch processing statistics
Sourcepub fn clear_batch_queue(&mut self)
pub fn clear_batch_queue(&mut self)
Clear the batch queue
Sourcepub fn enable_events(&mut self)
pub fn enable_events(&mut self)
Enable event system
Sourcepub fn disable_events(&mut self)
pub fn disable_events(&mut self)
Disable event system
Sourcepub fn get_event_history(&self) -> Option<String>
pub fn get_event_history(&self) -> Option<String>
Get event history as JSON
Sourcepub fn clear_event_history(&mut self)
pub fn clear_event_history(&mut self)
Clear event history
Sourcepub fn emit_custom_event(
&mut self,
event_type: u32,
source: &str,
data: Option<String>,
)
pub fn emit_custom_event( &mut self, event_type: u32, source: &str, data: Option<String>, )
Emit a custom event
Sourcepub async fn predict_with_batching(
&mut self,
input: &WasmTensor,
priority: Priority,
) -> Result<WasmTensor, JsValue>
pub async fn predict_with_batching( &mut self, input: &WasmTensor, priority: Priority, ) -> Result<WasmTensor, JsValue>
Single inference with automatic batching support
Sourcepub async fn flush_batches(&mut self) -> Result<Vec<BatchResponse>, JsValue>
pub async fn flush_batches(&mut self) -> Result<Vec<BatchResponse>, JsValue>
Process all pending batches
Trait Implementations§
Source§impl From<InferenceSession> for JsValue
impl From<InferenceSession> for JsValue
Source§fn from(value: InferenceSession) -> Self
fn from(value: InferenceSession) -> Self
Source§impl FromWasmAbi for InferenceSession
impl FromWasmAbi for InferenceSession
Source§impl IntoWasmAbi for InferenceSession
impl IntoWasmAbi for InferenceSession
Source§impl OptionFromWasmAbi for InferenceSession
impl OptionFromWasmAbi for InferenceSession
Source§impl OptionIntoWasmAbi for InferenceSession
impl OptionIntoWasmAbi for InferenceSession
Source§impl RefFromWasmAbi for InferenceSession
impl RefFromWasmAbi for InferenceSession
Source§type Anchor = RcRef<InferenceSession>
type Anchor = RcRef<InferenceSession>
Self for the duration of the
invocation of the function that has an &Self parameter. This is
required to ensure that the lifetimes don’t persist beyond one function
call, and so that they remain anonymous.Source§impl RefMutFromWasmAbi for InferenceSession
impl RefMutFromWasmAbi for InferenceSession
Source§impl TryFromJsValue for InferenceSession
impl TryFromJsValue for InferenceSession
Source§impl VectorFromWasmAbi for InferenceSession
impl VectorFromWasmAbi for InferenceSession
type Abi = <Box<[JsValue]> as FromWasmAbi>::Abi
unsafe fn vector_from_abi(js: Self::Abi) -> Box<[InferenceSession]>
Source§impl VectorIntoWasmAbi for InferenceSession
impl VectorIntoWasmAbi for InferenceSession
type Abi = <Box<[JsValue]> as IntoWasmAbi>::Abi
fn vector_into_abi(vector: Box<[InferenceSession]>) -> Self::Abi
Source§impl WasmDescribeVector for InferenceSession
impl WasmDescribeVector for InferenceSession
impl SupportsConstructor for InferenceSession
impl SupportsInstanceProperty for InferenceSession
impl SupportsStaticProperty for InferenceSession
Auto Trait Implementations§
impl Freeze for InferenceSession
impl !RefUnwindSafe for InferenceSession
impl !Send for InferenceSession
impl !Sync for InferenceSession
impl Unpin for InferenceSession
impl UnsafeUnpin for InferenceSession
impl !UnwindSafe for InferenceSession
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> ReturnWasmAbi for Twhere
T: IntoWasmAbi,
impl<T> ReturnWasmAbi for Twhere
T: IntoWasmAbi,
Source§type Abi = <T as IntoWasmAbi>::Abi
type Abi = <T as IntoWasmAbi>::Abi
IntoWasmAbi::AbiSource§fn return_abi(self) -> <T as ReturnWasmAbi>::Abi
fn return_abi(self) -> <T as ReturnWasmAbi>::Abi
IntoWasmAbi::into_abi, except that it may throw and never
return in the case of Err.