pub struct AdaptiveMaxPool2D<F: Float + Debug + Send + Sync> { /* private fields */ }
Expand description
Adaptive Max Pooling 2D layer
Applies max pooling with adaptive output size. The output size is specified, and the pooling kernel size and stride are computed automatically.
§Examples
use scirs2_neural::layers::{AdaptiveMaxPool2D, Layer};
use ndarray::{Array, Array4};
// Create an adaptive max pooling layer with output size 7x7
let pool = AdaptiveMaxPool2D::new((7, 7), Some("adaptive_max_pool")).unwrap();
// Forward pass with a batch of 2 samples, each with 3 channels and size 32x32
let batch_size = 2;
let channels = 3;
let height = 32;
let width = 32;
let input = Array4::<f64>::from_elem((batch_size, channels, height, width), 0.1).into_dyn();
let output = pool.forward(&input).unwrap();
// Output should have dimensions [batch_size, channels, 7, 7]
assert_eq!(output.shape(), &[batch_size, channels, 7, 7]);
Implementations§
Source§impl<F: Float + Debug + ScalarOperand + Send + Sync + 'static> AdaptiveMaxPool2D<F>
impl<F: Float + Debug + ScalarOperand + Send + Sync + 'static> AdaptiveMaxPool2D<F>
Sourcepub fn new(output_size: (usize, usize), name: Option<&str>) -> Result<Self>
pub fn new(output_size: (usize, usize), name: Option<&str>) -> Result<Self>
Create a new adaptive max pooling layer
§Arguments
output_size
- Desired output spatial size (height, width)name
- Optional name for the layer
§Returns
- A new adaptive max pooling layer
Examples found in repository?
examples/new_features_showcase.rs (line 53)
33fn demonstrate_adaptive_pooling() -> Result<(), Box<dyn std::error::Error>> {
34 println!("🔧 Adaptive Pooling Layers Demonstration");
35 println!("========================================\n");
36
37 // Create input tensor: batch_size=2, channels=3, height=32, width=32
38 let input = Array4::<f64>::from_elem((2, 3, 32, 32), 1.5);
39 println!("Input shape: {:?}", input.shape());
40
41 // Adaptive Average Pooling to 7x7
42 println!("\n1. Adaptive Average Pooling (32x32 → 7x7):");
43 let adaptive_avg_pool = AdaptiveAvgPool2D::new((7, 7), Some("adaptive_avg_7x7"))?;
44 let avg_output = adaptive_avg_pool.forward(&input.clone().into_dyn())?;
45 println!(" Output shape: {:?}", avg_output.shape());
46 println!(
47 " Layer description: {}",
48 adaptive_avg_pool.layer_description()
49 );
50
51 // Adaptive Max Pooling to 4x4
52 println!("\n2. Adaptive Max Pooling (32x32 → 4x4):");
53 let adaptive_max_pool = AdaptiveMaxPool2D::new((4, 4), Some("adaptive_max_4x4"))?;
54 let max_output = adaptive_max_pool.forward(&input.into_dyn())?;
55 println!(" Output shape: {:?}", max_output.shape());
56 println!(
57 " Layer description: {}",
58 adaptive_max_pool.layer_description()
59 );
60
61 // Non-square adaptive pooling
62 println!("\n3. Non-square Adaptive Pooling (32x32 → 3x5):");
63 let non_square_pool = AdaptiveAvgPool2D::new((3, 5), Some("non_square"))?;
64 let non_square_output =
65 non_square_pool.forward(&Array4::<f64>::from_elem((1, 2, 16, 20), 2.0).into_dyn())?;
66 println!(" Input shape: [1, 2, 16, 20]");
67 println!(" Output shape: {:?}", non_square_output.shape());
68
69 println!("✅ Adaptive pooling demonstration completed!\n");
70 Ok(())
71}
More examples
examples/generative_models_complete.rs (line 184)
163 pub fn new(config: GenerativeConfig, rng: &mut SmallRng) -> StdResult<Self> {
164 let (_height, _width) = config.input_size;
165
166 // Feature extraction layers
167 let mut feature_extractor = Sequential::new();
168 feature_extractor.add(Conv2D::new(1, 32, (3, 3), (2, 2), PaddingMode::Same, rng)?);
169 feature_extractor.add(BatchNorm::new(32, 1e-5, 0.1, rng)?);
170
171 feature_extractor.add(Conv2D::new(32, 64, (3, 3), (2, 2), PaddingMode::Same, rng)?);
172 feature_extractor.add(BatchNorm::new(64, 1e-5, 0.1, rng)?);
173
174 feature_extractor.add(Conv2D::new(
175 64,
176 128,
177 (3, 3),
178 (2, 2),
179 PaddingMode::Same,
180 rng,
181 )?);
182 feature_extractor.add(BatchNorm::new(128, 1e-5, 0.1, rng)?);
183
184 feature_extractor.add(AdaptiveMaxPool2D::new((4, 4), None)?);
185
186 // Calculate flattened feature size
187 let feature_size = 128 * 4 * 4;
188
189 // Mean head
190 let mut mean_head = Sequential::new();
191 mean_head.add(Dense::new(
192 feature_size,
193 config.hidden_dims[0],
194 Some("relu"),
195 rng,
196 )?);
197 mean_head.add(Dropout::new(0.2, rng)?);
198 mean_head.add(Dense::new(
199 config.hidden_dims[0],
200 config.latent_dim,
201 None,
202 rng,
203 )?);
204
205 // Log variance head
206 let mut logvar_head = Sequential::new();
207 logvar_head.add(Dense::new(
208 feature_size,
209 config.hidden_dims[0],
210 Some("relu"),
211 rng,
212 )?);
213 logvar_head.add(Dropout::new(0.2, rng)?);
214 logvar_head.add(Dense::new(
215 config.hidden_dims[0],
216 config.latent_dim,
217 None,
218 rng,
219 )?);
220
221 Ok(Self {
222 feature_extractor,
223 mean_head,
224 logvar_head,
225 config,
226 })
227 }
examples/object_detection_complete.rs (line 209)
178 pub fn new(config: DetectionConfig, rng: &mut SmallRng) -> StdResult<Self> {
179 // Feature extraction backbone (simplified ResNet-like)
180 let mut feature_extractor = Sequential::new();
181
182 // Initial conv block
183 feature_extractor.add(Conv2D::new(3, 64, (7, 7), (2, 2), PaddingMode::Same, rng)?);
184 feature_extractor.add(BatchNorm::new(64, 0.1, 1e-5, rng)?);
185 feature_extractor.add(MaxPool2D::new((2, 2), (2, 2), None)?);
186
187 // Feature blocks
188 feature_extractor.add(Conv2D::new(
189 64,
190 128,
191 (3, 3),
192 (2, 2),
193 PaddingMode::Same,
194 rng,
195 )?);
196 feature_extractor.add(BatchNorm::new(128, 0.1, 1e-5, rng)?);
197
198 feature_extractor.add(Conv2D::new(
199 128,
200 256,
201 (3, 3),
202 (2, 2),
203 PaddingMode::Same,
204 rng,
205 )?);
206 feature_extractor.add(BatchNorm::new(256, 0.1, 1e-5, rng)?);
207
208 // Global pooling to fixed size
209 feature_extractor.add(AdaptiveMaxPool2D::new(config.feature_map_size, None)?);
210
211 // Classification head
212 let mut classifier_head = Sequential::new();
213 let feature_dim = 256 * config.feature_map_size.0 * config.feature_map_size.1;
214 classifier_head.add(Dense::new(feature_dim, 512, Some("relu"), rng)?);
215 classifier_head.add(Dropout::new(0.5, rng)?);
216 classifier_head.add(Dense::new(512, 256, Some("relu"), rng)?);
217 classifier_head.add(Dropout::new(0.3, rng)?);
218 classifier_head.add(Dense::new(
219 256,
220 config.num_classes * config.max_objects,
221 Some("softmax"),
222 rng,
223 )?);
224
225 // Bounding box regression head
226 let mut bbox_regressor = Sequential::new();
227 bbox_regressor.add(Dense::new(feature_dim, 512, Some("relu"), rng)?);
228 bbox_regressor.add(Dropout::new(0.5, rng)?);
229 bbox_regressor.add(Dense::new(512, 256, Some("relu"), rng)?);
230 bbox_regressor.add(Dropout::new(0.3, rng)?);
231 bbox_regressor.add(Dense::new(256, 4 * config.max_objects, None, rng)?); // 4 coordinates per object
232
233 Ok(Self {
234 feature_extractor,
235 classifier_head,
236 bbox_regressor,
237 config,
238 })
239 }
Trait Implementations§
Source§impl<F: Float + Debug + ScalarOperand + Send + Sync + 'static> Layer<F> for AdaptiveMaxPool2D<F>
impl<F: Float + Debug + ScalarOperand + Send + Sync + 'static> Layer<F> for AdaptiveMaxPool2D<F>
Source§fn as_any_mut(&mut self) -> &mut dyn Any
fn as_any_mut(&mut self) -> &mut dyn Any
Get the layer as a mutable dyn Any for downcasting Read more
Source§fn forward(&self, input: &Array<F, IxDyn>) -> Result<Array<F, IxDyn>>
fn forward(&self, input: &Array<F, IxDyn>) -> Result<Array<F, IxDyn>>
Forward pass of the layer Read more
Source§fn backward(
&self,
_input: &Array<F, IxDyn>,
grad_output: &Array<F, IxDyn>,
) -> Result<Array<F, IxDyn>>
fn backward( &self, _input: &Array<F, IxDyn>, grad_output: &Array<F, IxDyn>, ) -> Result<Array<F, IxDyn>>
Backward pass of the layer to compute gradients Read more
Source§fn update(&mut self, _learning_rate: F) -> Result<()>
fn update(&mut self, _learning_rate: F) -> Result<()>
Update the layer parameters with the given gradients Read more
Source§fn layer_type(&self) -> &str
fn layer_type(&self) -> &str
Get the type of the layer (e.g., “Dense”, “Conv2D”) Read more
Source§fn parameter_count(&self) -> usize
fn parameter_count(&self) -> usize
Get the number of trainable parameters in this layer Read more
Source§fn layer_description(&self) -> String
fn layer_description(&self) -> String
Get a detailed description of this layer Read more
Source§fn gradients(&self) -> Vec<Array<F, IxDyn>> ⓘ
fn gradients(&self) -> Vec<Array<F, IxDyn>> ⓘ
Get the gradients of the layer parameters Read more
Source§fn set_gradients(&mut self, _gradients: &[Array<F, IxDyn>]) -> Result<()>
fn set_gradients(&mut self, _gradients: &[Array<F, IxDyn>]) -> Result<()>
Set the gradients of the layer parameters Read more
Source§fn set_params(&mut self, _params: &[Array<F, IxDyn>]) -> Result<()>
fn set_params(&mut self, _params: &[Array<F, IxDyn>]) -> Result<()>
Set the parameters of the layer Read more
Source§fn set_training(&mut self, _training: bool)
fn set_training(&mut self, _training: bool)
Set the layer to training mode (true) or evaluation mode (false) Read more
Source§fn is_training(&self) -> bool
fn is_training(&self) -> bool
Get the current training mode Read more
Auto Trait Implementations§
impl<F> Freeze for AdaptiveMaxPool2D<F>
impl<F> RefUnwindSafe for AdaptiveMaxPool2D<F>where
F: RefUnwindSafe,
impl<F> Send for AdaptiveMaxPool2D<F>
impl<F> Sync for AdaptiveMaxPool2D<F>
impl<F> Unpin for AdaptiveMaxPool2D<F>where
F: Unpin,
impl<F> UnwindSafe for AdaptiveMaxPool2D<F>where
F: UnwindSafe,
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read more