Skip to main content

TlStreamingExecutor

Trait TlStreamingExecutor 

Source
pub trait TlStreamingExecutor {
    type Tensor;
    type Error;

    // Required methods
    fn execute_stream(
        &mut self,
        graph: &EinsumGraph,
        input_stream: Vec<Vec<Vec<Self::Tensor>>>,
        config: &StreamingConfig,
    ) -> Result<Vec<StreamResult<Self::Tensor>>, Self::Error>;
    fn execute_chunk(
        &mut self,
        graph: &EinsumGraph,
        chunk_inputs: Vec<Self::Tensor>,
        metadata: &ChunkMetadata,
    ) -> Result<StreamResult<Self::Tensor>, Self::Error>;

    // Provided methods
    fn recommend_chunk_size(
        &self,
        graph: &EinsumGraph,
        available_memory_mb: usize,
    ) -> usize { ... }
    fn estimate_chunk_memory(
        &self,
        graph: &EinsumGraph,
        chunk_size: usize,
    ) -> usize { ... }
}
Expand description

Trait for executors that support streaming execution

Required Associated Types§

Required Methods§

Source

fn execute_stream( &mut self, graph: &EinsumGraph, input_stream: Vec<Vec<Vec<Self::Tensor>>>, config: &StreamingConfig, ) -> Result<Vec<StreamResult<Self::Tensor>>, Self::Error>

Execute graph on a stream of input chunks

Source

fn execute_chunk( &mut self, graph: &EinsumGraph, chunk_inputs: Vec<Self::Tensor>, metadata: &ChunkMetadata, ) -> Result<StreamResult<Self::Tensor>, Self::Error>

Execute graph on a single chunk with metadata

Provided Methods§

Source

fn recommend_chunk_size( &self, graph: &EinsumGraph, available_memory_mb: usize, ) -> usize

Get recommended chunk size based on available memory

Source

fn estimate_chunk_memory(&self, graph: &EinsumGraph, chunk_size: usize) -> usize

Estimate memory usage per chunk

Implementors§