Struct candle_transformers::models::segment_anything::sam::Sam
source · pub struct Sam { /* private fields */ }Implementations§
source§impl Sam
impl Sam
pub fn new( encoder_embed_dim: usize, encoder_depth: usize, encoder_num_heads: usize, encoder_global_attn_indexes: &[usize], vb: VarBuilder<'_> ) -> Result<Self>
pub fn new_tiny(vb: VarBuilder<'_>) -> Result<Self>
pub fn embeddings(&self, img: &Tensor) -> Result<Tensor>
pub fn forward( &self, img: &Tensor, points: &[(f64, f64, bool)], multimask_output: bool ) -> Result<(Tensor, Tensor)>
sourcepub fn forward_for_embeddings(
&self,
img_embeddings: &Tensor,
original_h: usize,
original_w: usize,
points: &[(f64, f64, bool)],
multimask_output: bool
) -> Result<(Tensor, Tensor)>
pub fn forward_for_embeddings( &self, img_embeddings: &Tensor, original_h: usize, original_w: usize, points: &[(f64, f64, bool)], multimask_output: bool ) -> Result<(Tensor, Tensor)>
Generate the mask and IOU predictions from some image embeddings and prompt.
The prompt is specified as a list of points (x, y, b). x and y are the point
coordinates (between 0 and 1) and b is true for points that should be part of the mask
and false for points that should be part of the background and so excluded from the mask.
pub fn unpreprocess(&self, img: &Tensor) -> Result<Tensor>
pub fn preprocess(&self, img: &Tensor) -> Result<Tensor>
pub fn generate_masks( &self, img: &Tensor, points_per_side: usize, crop_n_layer: usize, crop_overlap_ratio: f64, crop_n_points_downscale_factor: usize ) -> Result<Vec<Bbox<Tensor>>>
Trait Implementations§
Auto Trait Implementations§
impl !RefUnwindSafe for Sam
impl Send for Sam
impl Sync for Sam
impl Unpin for Sam
impl !UnwindSafe for Sam
Blanket Implementations§
source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more