tract_core/ops/cnn/conv/
conv.rs

1use tract_data::itertools::izip;
2use tract_linalg::block_quant::{BlockQuantFact, PackedBlockQuantFormat};
3use tract_linalg::WeightType;
4use tract_num_traits::Zero;
5
6use crate::internal::*;
7use crate::model::*;
8use crate::ops;
9use crate::ops::array::Pad;
10use crate::ops::array::PadMode;
11use crate::ops::binary::TypedBinOp;
12use crate::ops::cast::cast;
13use crate::ops::cnn::conv::block_quant::{BlockQuantIntoShape, SplitGroupBlockQuant};
14use crate::ops::cnn::conv::lazy_im2col::LazyIm2Col;
15use crate::ops::cnn::conv::lazy_im2col::LazyIm2colParams;
16use crate::ops::cnn::wire_reshape_bias_for_bin;
17use crate::ops::cnn::PaddingSpec::*;
18use crate::ops::einsum::EinSum;
19use crate::ops::math::{add, div, mul, sub};
20use crate::ops::math::{Add, Div, Mul, Sub};
21use crate::ops::matmul::optimized::AddMatMulGeometry;
22use crate::ops::matmul::optimized::MapOutputAxisToInput;
23use crate::ops::matmul::pack::{OptMatMulPack, OptSimpleMatMulPack};
24use crate::ops::matmul::quant::wire_ensure_q8_flavour;
25use crate::ops::matmul::ModePicker;
26use crate::ops::nn::Reduce;
27
28use super::depth_wise::DepthWise;
29use super::im2col::Im2Col;
30use crate::ops::cnn::conv::{block_quant_aware_weight_shape, KernelFormat};
31use crate::ops::cnn::pools::{ConcretePoolGeometry, PoolGeometry, PoolSpec};
32use crate::ops::matmul::optimized::{OptMatMul, ProtoFusedSpec};
33use crate::ops::nn::{BaseDataShape, DataFormat, DataShape};
34
35use tract_linalg::mmm::{MMMInputFormat, MatMatMul};
36use tract_linalg::pack::PackedFormat;
37
38#[derive(Debug, Clone, new, Hash)]
39pub struct Conv {
40    pub pool_spec: PoolSpec,
41    pub kernel_fmt: KernelFormat,
42    pub group: usize,
43    // None -> floats
44    // Some(I32) -> output is I32 (use quantized kernels, but output will be i32). last 2 Q inputs
45    // are ignored
46    // Some(QXX) -> quantized XX, but parameters are ignored (I8, U8, or I32) in favor of last 2 Q inputs
47    pub q_params: Option<DatumType>,
48}
49
50impl Conv {
51    pub fn input_channels(&self) -> usize {
52        self.pool_spec.input_channels
53    }
54
55    pub fn output_channels(&self) -> usize {
56        self.pool_spec.output_channels
57    }
58
59    pub fn wire_kernel_as_g_o_ihw(
60        &self,
61        model: &mut TypedModel,
62        name: &str,
63        mut kernel: OutletId,
64    ) -> TractResult<TVec<OutletId>> {
65        let fact = model.outlet_fact(kernel)?;
66        if fact.datum_type.is_opaque() {
67            ensure!(self.kernel_fmt == KernelFormat::OIHW && fact.rank() == 0);
68            kernel = model.wire_node(
69                format!("{name}.prep_kernel.g"),
70                SplitGroupBlockQuant { group: self.group },
71                &[kernel],
72            )?[0];
73            kernel = model.wire_node(
74                format!("{name}.prep_kernel.ihw"),
75                BlockQuantIntoShape {
76                    shape: tvec!(
77                        self.output_channels() / self.group,
78                        self.input_channels() / self.group
79                            * self.pool_spec.kernel_shape.iter().product::<usize>(),
80                    ),
81                },
82                &[kernel],
83            )?[0];
84            Ok(tvec!(kernel))
85        } else {
86            for (ix, op) in self
87                .kernel_fmt
88                .kernel_as_group_o_ihw_ops(&fact.shape, self.group)
89                .into_iter()
90                .enumerate()
91            {
92                kernel = model.wire_node(format!("{name}.prep_kernel.{ix}"), op, &[kernel])?[0];
93            }
94            Ok(tvec!(kernel))
95        }
96    }
97
98    fn wire_pack_g_o_ihw(
99        &self,
100        model: &mut TypedModel,
101        name: &str,
102        format: &dyn MMMInputFormat,
103        kernel: OutletId,
104    ) -> TractResult<OutletId> {
105        let fact = model.outlet_fact(kernel)?;
106        let wire = if fact.datum_type.is_opaque() {
107            let fact = model
108                .outlet_fact(kernel)?
109                .opaque_fact
110                .as_ref()
111                .and_then(|of| of.downcast_ref::<BlockQuantFact>())
112                .context("Only manage BlockQuant")?;
113            model.wire_node(
114                format!("{name}.prep_kernel.pack"),
115                OptSimpleMatMulPack {
116                    packed_format: format
117                        .downcast_ref::<PackedBlockQuantFormat>()
118                        .context("Expect a block quant format")?
119                        .clone(),
120                    k: fact.k(),
121                    m: fact.m(),
122                },
123                &[kernel],
124            )?
125        } else {
126            let format = format
127                .downcast_ref::<PackedFormat>()
128                .context("Expect regular packing for numeric weights")?;
129            model.wire_node(
130                format!("{name}.prep_kernel.pack"),
131                OptMatMulPack {
132                    packers: vec![format.clone()],
133                    k_axis: 2,
134                    mn_axis: 1,
135                    mode_picker: ModePicker::Single,
136                },
137                &[kernel],
138            )?
139        };
140        Ok(wire[0])
141    }
142
143    // group,bias
144    fn wire_bias_as_non_linear(
145        &self,
146        model: &mut TypedModel,
147        name: &str,
148        bias: OutletId,
149        c_group_axis: usize,
150    ) -> TractResult<(ProtoFusedSpec, OutletId)> {
151        use tract_linalg::BinOp::Add;
152        let fact = model.outlet_fact(bias)?;
153        if fact.shape.volume().is_one() {
154            Ok((ProtoFusedSpec::BinScalar(2, Add), bias))
155        } else {
156            let bias = AxisOp::wire_split_axis(
157                model,
158                format!("{name}.reformat_bias"),
159                bias,
160                0,
161                self.group,
162            )?[0];
163            let pfs =
164                ProtoFusedSpec::BinPerRow(2, Add, MapOutputAxisToInput(tvec!((c_group_axis, 0))));
165            Ok((pfs, bias))
166        }
167    }
168
169    pub unsafe fn wire_as_quant_im2col(
170        &self,
171        model: &mut TypedModel,
172        name: &str,
173        wires: &[OutletId],
174    ) -> TractResult<TVec<OutletId>> {
175        ensure!(self.q_params.is_some());
176        use crate::ops::matmul::quant as qmm;
177
178        let c_dt = self.q_params.unwrap();
179        let &[mut x, mut kernel, bias, mut x0, x_scale, mut k0, mut k_scale, y0, y_scale] = wires
180        else {
181            bail!("Wrong number of inputs")
182        };
183        wire_ensure_q8_flavour(model, name, &mut kernel, "k", &mut k0, i8::datum_type())?;
184        wire_ensure_q8_flavour(model, name, &mut x, "x", &mut x0, i8::datum_type())?;
185
186        let a_fact = model.outlet_fact(kernel)?.clone();
187        let b_fact = model.outlet_fact(x)?.clone();
188
189        let (_geo, m, k, n) = self.compute_geo(&b_fact)?;
190        let (mmm, packing) = self.choose_impl(&b_fact, &a_fact, m, k, &n)?;
191        let output_shape = self.pool_spec.output_shape(&b_fact.shape)?;
192
193        if !model.outlet_fact(k_scale)?.shape.volume().is_one() {
194            // requant is performed before geo_reshape, so we need at most one geo axis to the
195            // right
196            if !output_shape.fmt.c_is_last() {
197                k_scale = model.wire_node(
198                    format!("{name}.a_scale_axis_fix"),
199                    AxisOp::Add(1),
200                    &[k_scale],
201                )?[0];
202            }
203        }
204
205        let abc_scale = qmm::combine_scales(model, name, k_scale, x_scale, y_scale)?;
206
207        let im2col = model.wire_node(
208            format!("{name}.im2col"),
209            Im2Col::new(
210                self.pool_spec.clone(),
211                self.group,
212                k,
213                &b_fact.shape,
214                mmm.clone(),
215                packing,
216            )?,
217            &[x, x0],
218        )?[0];
219
220        let g_o_ihw = self.wire_kernel_as_g_o_ihw(model, name, kernel)?;
221        let g_o_ihw_as_i32 =
222            model.wire_node(format!("{name}.kernel_as_i32"), cast(i32::datum_type()), &g_o_ihw)?;
223        let sum_ker_g_c_k = model.wire_node(
224            format!("{name}.sum_ker_g_c_k"),
225            Reduce::new(tvec!(2), ops::nn::Reducer::Sum),
226            &g_o_ihw_as_i32,
227        )?;
228        let sum_ker_a_g_c =
229            model.wire_node(format!("{name}.rm_k"), AxisOp::Rm(2), &sum_ker_g_c_k)?;
230        // align sum_A from G,C to "C" shape: N,HW,G,C (or N,G,C,HW)
231        let sum_ker_n_g_c = model.wire_node(
232            format!("{name}.sum_ker_n_g_c.axis_0"),
233            AxisOp::Add(0),
234            &sum_ker_a_g_c,
235        )?;
236        let hw_position = if self.pool_spec.data_format.c_is_last() { 1 } else { 3 };
237        let sum_ker = model.wire_node(
238            format!("{name}.sum_ker_n_g_c"),
239            AxisOp::Add(hw_position),
240            &sum_ker_n_g_c,
241        )?;
242
243        ensure!(mmm.packings()[packing].1.downcast_ref::<PackedFormat>().is_some());
244        let mut sum_x = model.wire_node(
245            format!("{name}.sum_x"),
246            super::QSumB { dt: b_fact.datum_type, n, r: mmm.nr(), k },
247            &[im2col],
248        )?;
249        // sum_b is N,G,HW. make it N,HW,G,C or N,G,C,HW
250        sum_x = model.wire_node(format!("{name}.add_c"), AxisOp::Add(2), &sum_x)?;
251        if self.pool_spec.data_format.c_is_last() {
252            sum_x =
253                model.wire_node(format!("{name}.transpose_sum_b"), AxisOp::Move(3, 1), &sum_x)?;
254        }
255
256        let (mmm_output_shape, c_axis, h_axis) = self.mmm_output_shape(&output_shape)?;
257        let bias_name = &model.node(bias.node).name;
258        let bias =
259            model.wire_node(format!("{bias_name}.cast"), cast(mmm.internal_type()), &[bias])?[0];
260        let wire = self.wire_mm_weights_bias(
261            model,
262            name,
263            im2col,
264            g_o_ihw[0],
265            bias,
266            mmm,
267            packing,
268            i32::datum_type(),
269            mmm_output_shape.clone().into(),
270            k,
271            c_axis,
272            h_axis,
273        )?;
274
275        let wire = qmm::compensate_zero_points(
276            model,
277            name,
278            wire[0],
279            k.to_dim(),
280            k0,
281            x0,
282            sum_ker[0],
283            sum_x[0],
284        )?;
285
286        let wire = self.wire_remove_group(model, name, &[wire], &mmm_output_shape, c_axis)?;
287        let wire = self.wire_rm_n_if_needed(model, name, &wire)?;
288        let wire = qmm::requant(model, name, wire[0], c_dt, abc_scale, y0)?;
289        Self::wire_geo_reshape(model, name, &[wire], &output_shape)
290    }
291
292    pub fn wire_remove_group<D: DimLike>(
293        &self,
294        model: &mut TypedModel,
295        name: &str,
296        wire: &[OutletId],
297        mmm_output_shape: &[D],
298        c_axis: usize,
299    ) -> TractResult<TVec<OutletId>> {
300        let m = &mmm_output_shape[c_axis];
301        let op = if self.group == 1 {
302            AxisOp::Rm(c_axis - 1)
303        } else {
304            AxisOp::Reshape(
305                c_axis - 1,
306                tvec!(self.group.to_dim(), m.to_dim()),
307                tvec!(m.to_dim() * self.group),
308            )
309        };
310        model.wire_node(format!("{name}.reshape_group"), op, wire)
311    }
312
313    pub unsafe fn wire_as_im2col_pair(
314        &self,
315        model: &mut TypedModel,
316        name: &str,
317        wire: &[OutletId],
318    ) -> TractResult<TVec<OutletId>> {
319        let &[x, w, bias] = wire else { bail!("Wrong number of inputs") };
320        let x_fact = model.outlet_fact(x)?.clone();
321        let w_fact = model.outlet_fact(w)?.clone();
322        let c_dt = crate::ops::matmul::output_type(x_fact.datum_type);
323
324        let (_, m, k, n) = self.compute_geo(&x_fact)?;
325        let (mmm, packing) = self.choose_impl(&x_fact, &w_fact, m, k, &n)?;
326        let geo_output_shape = self.pool_spec.output_shape(&x_fact.shape)?;
327        let (mmm_output_shape, c_axis, h_axis) = self.mmm_output_shape(&geo_output_shape)?;
328
329        let padding =
330            model.add_const(format!("{name}.b0"), Tensor::zero_scalar_dt(x_fact.datum_type)?)?;
331
332        let mut wire: TVec<_> = wire.into();
333        wire[0] = model.wire_node(
334            format!("{name}.im2col"),
335            Im2Col::new(
336                self.pool_spec.clone(),
337                self.group,
338                k,
339                &x_fact.shape,
340                mmm.clone(),
341                packing,
342            )?,
343            &[wire[0], padding],
344        )?[0];
345
346        let g_o_ihw = self.wire_kernel_as_g_o_ihw(model, name, wire[1])?;
347
348        let wire = self
349            .wire_mm_weights_bias(
350                model,
351                name,
352                wire[0],
353                g_o_ihw[0],
354                bias,
355                mmm,
356                packing,
357                c_dt,
358                mmm_output_shape.clone().into(),
359                k.to_usize().unwrap(),
360                c_axis,
361                h_axis,
362            )
363            .context("in wire_opt_matmul")?;
364
365        let wire = self.wire_remove_group(model, name, &wire, &mmm_output_shape, c_axis)?;
366        let wire = self.wire_rm_n_if_needed(model, name, &wire)?;
367        Self::wire_geo_reshape(model, name, &wire, &geo_output_shape)
368    }
369
370    // always have N and G. G is right before C, c_axis point to C, c_axis-1 points to G
371    fn mmm_output_shape<D: DimLike>(
372        &self,
373        output_shape: &BaseDataShape<D, TVec<D>>,
374    ) -> TractResult<(TVec<D>, usize, usize)> {
375        let geo_collapsed_out: D = output_shape.hw_dims().iter().cloned().product();
376        let shape: BaseDataShape<D, TVec<D>> = output_shape.fmt.with_n().from_n_c_hw(
377            output_shape.n().cloned().unwrap_or_else(|| 1.into()),
378            output_shape.c().clone(),
379            tvec!(geo_collapsed_out),
380        )?;
381        let mut mmm_output_shape: TVec<D> = shape.shape.clone();
382        let mut c_axis = shape.c_axis();
383        let mut h_axis = shape.h_axis();
384        mmm_output_shape[shape.c_axis()] = mmm_output_shape[c_axis].clone() / self.group;
385        mmm_output_shape.insert(c_axis, self.group.into());
386        if h_axis > c_axis {
387            h_axis += 1;
388        }
389        c_axis += 1;
390        Ok((mmm_output_shape, c_axis, h_axis))
391    }
392
393    fn wire_rm_n_if_needed(
394        &self,
395        model: &mut TypedModel,
396        name: &str,
397        wire: &[OutletId],
398    ) -> TractResult<TVec<OutletId>> {
399        if self.pool_spec.data_format.has_n() {
400            Ok(wire.into())
401        } else {
402            model.wire_node(format!("{name}.rm_n"), AxisOp::Rm(0), wire)
403        }
404    }
405
406    fn wire_geo_reshape<D: DimLike>(
407        model: &mut TypedModel,
408        name: &str,
409        wire: &[OutletId],
410        output_shape: &BaseDataShape<D, TVec<D>>,
411    ) -> TractResult<TVec<OutletId>> {
412        let geo_collapsed_out: D = output_shape.hw_dims().iter().cloned().product();
413        model
414            .wire_node(
415                name,
416                AxisOp::Reshape(
417                    output_shape.h_axis(),
418                    tvec!(geo_collapsed_out.to_dim()),
419                    output_shape.hw_dims().iter().map(|d| d.to_dim()).collect(),
420                ),
421                wire,
422            )
423            .context("in wire_geo_reshape")
424    }
425
426    pub unsafe fn wire_as_lazy_im2col(
427        &self,
428        model: &mut TypedModel,
429        name: &str,
430        wire: &[OutletId],
431    ) -> TractResult<TVec<OutletId>> {
432        let &[mut x, kernel, bias] = wire else { bail!("Wrong number of inputs") };
433        let mut x_fact = model.outlet_fact(x)?.clone();
434        let w_fact = model.outlet_fact(kernel)?.clone();
435        let (geo, m, k, n) = self.compute_geo(&x_fact)?;
436        let (mmm, packing) = self.choose_impl(&x_fact, &w_fact, m, k, &n)?;
437        debug!("{name} as lazy_im2col: m={m} k={k} n={n} {mmm:?}");
438        let input_shape = x_fact.shape.as_concrete().unwrap().to_vec();
439        let mut geo = geo.to_concrete(&input_shape)?.into_owned();
440        let mut input_shape: DataShape = self.pool_spec.data_format.shape(input_shape.into())?;
441        let padding = self.pool_spec.computed_padding(input_shape.hw_dims());
442        if padding.iter().any(|axis| axis.pad_before != 0 || axis.pad_after != 0) {
443            let mut pads = vec![(0, 0); x_fact.rank()];
444            for (ix, ax) in padding.iter().enumerate() {
445                pads[input_shape.h_axis() + ix] = (ax.pad_before, ax.pad_after);
446            }
447            let op = crate::ops::array::Pad {
448                mode: crate::ops::array::PadMode::Constant(
449                    Tensor::zero_scalar_dt(x_fact.datum_type)?.into_arc_tensor(),
450                ),
451                pads,
452            };
453            x = model.wire_node(format!("{name}.pad"), op, &[x])?[0];
454            let valid_pool_spec = PoolSpec { padding: Valid, ..self.pool_spec.clone() };
455            x_fact = model.outlet_fact(x)?.clone();
456            let concrete_shape = x_fact.shape.as_concrete().unwrap();
457            input_shape = valid_pool_spec.data_format.shape(concrete_shape.into())?;
458            geo = valid_pool_spec
459                .compute_geo(&x_fact.shape)?
460                .to_concrete(concrete_shape)?
461                .into_owned();
462        }
463        let c_dt = crate::ops::matmul::output_type(x_fact.datum_type);
464        let c_stride = input_shape.c_stride();
465        let size_of_b = x_fact.datum_type.size_of() as isize;
466        let n_byte_offsets: Vec<isize> =
467            geo.patch.centers_offsets().into_iter().map(|x| x * size_of_b).collect();
468        let k_byte_offsets: Vec<isize> = (0..self.input_channels())
469            .flat_map(|ici| {
470                geo.patch
471                    .standard_layout_data_field
472                    .iter()
473                    .map(move |x| (x + (ici * c_stride) as isize) * size_of_b)
474            })
475            .collect();
476        let (mmm_output_shape, c_axis, h_axis) = self.mmm_output_shape(&geo.output_shape)?;
477        let packer = mmm.packings()[packing]
478            .1
479            .downcast_ref::<PackedFormat>()
480            .with_context(|| {
481                format_err!(
482                    "Quand Im2Col expects regular packed format, got {:?}",
483                    mmm.packings()[packing].1
484                )
485            })?
486            .clone();
487        let params = LazyIm2colParams { packer, n_byte_offsets, k_byte_offsets };
488        let x = model.wire_node(
489            format!("{name}.lazyIm2col"),
490            LazyIm2Col { params: Arc::new(params) },
491            &[x],
492        )?[0];
493
494        let kernel = self.wire_kernel_as_g_o_ihw(model, name, kernel)?[0];
495        let wire = self.wire_mm_weights_bias(
496            model,
497            name,
498            x,
499            kernel,
500            bias,
501            mmm,
502            packing,
503            c_dt,
504            mmm_output_shape.clone().into(),
505            k,
506            c_axis,
507            h_axis,
508        )?;
509
510        let wire = self.wire_remove_group(model, name, &wire, &mmm_output_shape, c_axis)?;
511        let wire = self.wire_rm_n_if_needed(model, name, &wire)?;
512        Self::wire_geo_reshape(model, name, &wire, &geo.output_shape)
513    }
514
515    #[allow(clippy::type_complexity)]
516    fn compute_geo(
517        &self,
518        input_fact: &TypedFact,
519    ) -> TractResult<(PoolGeometry, usize, usize, TDim)> {
520        let geo = self.pool_spec.compute_geo(&input_fact.shape)?;
521
522        trace!("output channels: {:?}", self.output_channels());
523        let m = self.output_channels() / self.group;
524        let k = self.input_channels() * self.pool_spec.kernel_shape.iter().product::<usize>()
525            / self.group;
526        let n: TDim =
527            self.pool_spec.output_shape(&input_fact.shape)?.hw_dims().iter().cloned().product();
528        Ok((geo, m, k, n))
529    }
530
531    fn choose_impl(
532        &self,
533        input_fact: &TypedFact,
534        weight_fact: &TypedFact,
535        m: usize,
536        k: usize,
537        n: &TDim,
538    ) -> TractResult<(Box<dyn MatMatMul>, usize)> {
539        let w_dt = weight_fact.datum_type;
540        let x_dt = input_fact.datum_type;
541
542        let acc = if x_dt.is_float() { x_dt } else { i32::datum_type() };
543        if w_dt.is_opaque() {
544            let bqf = weight_fact
545                .opaque_fact
546                .as_ref()
547                .and_then(|of| of.downcast_ref::<BlockQuantFact>())
548                .unwrap();
549            let weight_type = WeightType::BlockQuant(bqf.format.clone());
550            tract_linalg::ops()
551                .mmm_impls()
552                .iter()
553                .filter(|mmm| mmm.internal_type() == acc)
554                .flat_map(|mmm| {
555                    mmm.packings().iter().enumerate().map(move |(ix, p)| (mmm, ix, &p.0, &p.1))
556                })
557                .filter(|(_, _, pa, pb)| {
558                    pb.precursor() == x_dt.into() && pa.precursor() == weight_type
559                })
560                .map(|(mmm, p, _, _)| (mmm.clone(), p))
561                .min_by_key(|(mmm, _)| {
562                    mmm.quality().cost() as isize * 1000 - (mmm.mr() * mmm.nr()) as isize
563                })
564                .context("Not matmu found")
565        } else {
566            let mmm = tract_linalg::ops()
567                .mmm(acc, Some(m), Some(k), n.to_usize().ok())
568                .context("No matmul found")?;
569            let packing = mmm
570                .packings()
571                .iter()
572                .position(|p| {
573                    p.0.precursor() == w_dt.unquantized().into()
574                        && p.1.precursor() == x_dt.unquantized().into()
575                })
576                .context("No packing found")?;
577            Ok((mmm, packing))
578        }
579    }
580
581    #[allow(clippy::too_many_arguments)]
582    fn wire_mm_weights_bias(
583        &self,
584        model: &mut TypedModel,
585        name: &str,
586        input: OutletId,
587        g_o_ihw: OutletId,
588        bias: OutletId,
589        mmm: Box<dyn MatMatMul>,
590        packing: usize,
591        c_datum_type: DatumType,
592        mmm_output_shape: ShapeFact,
593        k: usize,
594        c_m_axis: usize,
595        c_n_axis: usize,
596    ) -> TractResult<TVec<OutletId>> {
597        ensure!(model.outlet_fact(bias)?.datum_type == mmm.internal_type());
598        let a_pack = &mmm.packings()[packing].0;
599        let packed_ker = self
600            .wire_pack_g_o_ihw(model, name, &**a_pack, g_o_ihw)
601            .context("in kernel_as_packed_as")?;
602        let (mut c_to_a_axis_mapping, mut c_to_b_axis_mapping) = (tvec!(), tvec!());
603
604        c_to_a_axis_mapping.push((c_m_axis - 1, 0)); // Group
605        c_to_b_axis_mapping.push((0, 0)); // Batch
606        c_to_b_axis_mapping.push((c_m_axis - 1, 1)); // Group
607
608        let geo = AddMatMulGeometry {
609            k: k.to_dim(),
610            c_to_a_axis_mapping: MapOutputAxisToInput(c_to_a_axis_mapping),
611            c_to_b_axis_mapping: MapOutputAxisToInput(c_to_b_axis_mapping),
612        };
613        let mut ops: Vec<ProtoFusedSpec> =
614            vec![ProtoFusedSpec::AddMatMul { geo, a: 1, b: 0, packings: vec![(packing, None)] }];
615        let mut wires: TVec<OutletId> = tvec!(input, packed_ker);
616        let bias_fact = model.outlet_fact(bias)?;
617        if bias_fact.konst.is_none() || !bias_fact.konst.as_ref().unwrap().is_all_zero()? {
618            let (fused, bias) = self.wire_bias_as_non_linear(model, name, bias, c_m_axis - 1)?;
619            wires.push(bias);
620            ops.push(fused);
621        }
622        ops.push(ProtoFusedSpec::Store(vec![unsafe { mmm.c_view(c_m_axis, c_n_axis) }]));
623        model.wire_node(
624            format!("{name}.matmatmul"),
625            OptMatMul::new(
626                vec![mmm],
627                ModePicker::Single,
628                c_datum_type.fact(mmm_output_shape),
629                c_m_axis,
630                c_n_axis,
631                ops,
632                packing == 0 && self.group == 1,
633            )?,
634            &wires,
635        )
636    }
637
638    pub fn wire_as_depth_wise(
639        &self,
640        model: &mut TypedModel,
641        name: &str,
642        wire: &[OutletId],
643    ) -> TractResult<OutletId> {
644        let &[x, kernel, mut bias] = wire else { bail!("Wrong number of inputs") };
645        let x_fact = model.outlet_fact(x)?.clone();
646        let x_shape = x_fact.shape.as_concrete().unwrap();
647        let ConcretePoolGeometry { input_shape, patch, output_shape } =
648            self.pool_spec.compute_geo(&x_fact.shape)?.to_concrete(x_shape)?.into_owned();
649        let kernel = self.wire_kernel_as_g_o_ihw(model, name, kernel)?;
650        let c_axis = self.pool_spec.data_format.shape(x_shape)?.c_axis();
651        bias = wire_reshape_bias_for_bin(
652            model,
653            name,
654            bias,
655            x_fact.rank(),
656            c_axis,
657            self.output_channels(),
658        )?[0];
659        let op = DepthWise::new(patch, input_shape, output_shape);
660        Ok(model.wire_node(name, op, &[x, kernel[0], bias])?[0])
661    }
662
663    fn declutter_stride_slice_to_downsample(
664        &self,
665        model: &TypedModel,
666        node: &TypedNode,
667    ) -> TractResult<Option<TypedModelPatch>> {
668        let spatial_rank = self.pool_spec.rank();
669        if let Some(axis) = (0..spatial_rank).find(|&ax| {
670            self.pool_spec.stride(ax) > 1
671                && self.pool_spec.padding.valid_dim(ax, self.pool_spec.stride(ax) == 1)
672                && (self.pool_spec.kernel_shape[ax] == 1
673                    || self.pool_spec.dilation(ax) % self.pool_spec.stride(ax) == 0)
674        }) {
675            let input_fact = model.outlet_fact(node.inputs[0])?;
676            let downsample_factor = self.pool_spec.stride(axis);
677            let mut new_op = self.clone();
678            if new_op.pool_spec.dilation(axis) > 1 {
679                new_op.pool_spec.dilations.as_mut().unwrap()[axis] /= downsample_factor;
680            }
681            new_op.pool_spec.strides.as_mut().unwrap()[axis] /= downsample_factor;
682            let mut patch = TypedModelPatch::default();
683            let mut taps = patch.taps(model, &node.inputs)?;
684            let shape = self.pool_spec.data_format.shape(&input_fact.shape)?;
685            taps[0] = patch.wire_node(
686                format!("{}.downsample.{}", node.name, axis),
687                crate::ops::Downsample::new(axis + shape.h_axis(), downsample_factor as isize, 0),
688                &[taps[0]],
689            )?[0];
690            let id = patch.wire_node(&*node.name, new_op, &taps)?[0];
691            patch.shunt_outside(model, OutletId::new(node.id, 0), id)?;
692            return Ok(Some(patch));
693        }
694        Ok(None)
695    }
696
697    fn declutter_as_einsum(
698        &self,
699        model: &TypedModel,
700        node: &TypedNode,
701    ) -> TractResult<Option<TypedModelPatch>> {
702        let (input_facts, output_facts) = model.node_facts(node.id)?;
703        let full_input_shape = input_facts[0].shape.to_tvec();
704        let input_shape = self.pool_spec.data_format.shape(&full_input_shape)?;
705        if self.group == 1
706            && self.pool_spec.strides().iter().all(|s| *s == 1)
707            && self.pool_spec.dilations().iter().all(|d| *d == 1)
708            && self.pool_spec.kernel_shape.iter().product::<usize>() == 1
709            && self
710                .pool_spec
711                .computed_padding(input_shape.hw_dims())
712                .iter()
713                .all(|pad| pad.pad_after.is_zero() && pad.pad_before.is_zero())
714        {
715            let mut axes = self.axes_mapping(&input_facts, &output_facts)?;
716            let mut patch = TypedModelPatch::new("declutter_as_einsum");
717            let mut taps = patch.taps(model, &node.inputs)?;
718            let name = &node.name;
719            let co = self.output_channels();
720            taps[1] =
721                self.wire_kernel_as_g_o_ihw(&mut patch, &format!("{name}.filters"), taps[1])?[0];
722            taps[1] =
723                patch.wire_node(format!("{name}.filters_as_co_ci"), AxisOp::Rm(0), &[taps[1]])?[0];
724
725            while axes.rank(InOut::In(1)) > 0 {
726                axes = axes.remove_axis_occurency(InOut::In(1), 0)?;
727            }
728            axes = axes
729                .with_extra_axis_occurency('O', InOut::In(1), 0)?
730                .with_extra_axis_occurency('I', InOut::In(1), 1)?;
731
732            let bias_fact = input_facts[2];
733            let wire = if self.q_params.is_some() {
734                if bias_fact.rank() == 1 {
735                    axes = axes.linking('O', (InOut::In(2), 0))?;
736                }
737                let op = EinSum { axes, operating_dt: i32::datum_type(), q_params: self.q_params };
738                patch.wire_node(format!("{name}.einsum"), op, &taps)?[0]
739            } else {
740                axes = axes.remove_slot(InOut::In(2))?;
741                let op = EinSum { axes, operating_dt: input_facts[0].datum_type, q_params: None };
742                let mut wire = patch.wire_node(format!("{name}.einsum"), op, &taps[0..2])?[0];
743
744                if !bias_fact.konst.as_ref().map(|f| f.is_zero()).transpose()?.unwrap_or(false) {
745                    let bias_current_shape =
746                        if bias_fact.rank() == 0 { tvec!() } else { tvec!(co.to_dim()) };
747                    let mut bias_shape = tvec!(1.to_dim(); input_shape.rank());
748                    if bias_fact.rank() > 0 {
749                        bias_shape[input_shape.c_axis()] = co.to_dim();
750                    }
751                    let b = patch.wire_node(
752                        format!("{name}.bias.reshape"),
753                        AxisOp::Reshape(0, bias_current_shape, bias_shape),
754                        &[taps[2]],
755                    )?[0];
756                    wire = patch.wire_node(
757                        format!("{name}.bias"),
758                        crate::ops::math::add(),
759                        &[wire, b],
760                    )?[0];
761                }
762                wire
763            };
764            patch.node_mut(wire.node).name = node.name.to_string();
765            patch.shunt_outside(model, node.id.into(), wire)?;
766            return Ok(Some(patch));
767        }
768        Ok(None)
769    }
770
771    fn declutter_precursor_padding(
772        &self,
773        model: &TypedModel,
774        node: &TypedNode,
775    ) -> TractResult<Option<TypedModelPatch>> {
776        if matches!(self.pool_spec.padding, ExplicitOnnxPool(_, _, _) | SameLower | SameUpper) {
777            return Ok(None);
778        }
779        let prec = model.node(node.inputs[0].node);
780        let pad = if let Some(pad) = prec.op_as::<Pad>() { pad } else { return Ok(None) };
781        let value = if let PadMode::Constant(c) = &pad.mode {
782            c
783        } else {
784            return Ok(None);
785        };
786        let shape = self.pool_spec.data_format.shape(&model.outlet_fact(node.inputs[0])?.shape)?;
787        if !value.is_zero()?
788            || (self.pool_spec.data_format.has_n() && pad.pads[0] != (0, 0))
789            || pad.pads[shape.c_axis()] != (0, 0)
790        {
791            return Ok(None);
792        }
793        let mut before: TVec<usize> = pad.pads[shape.hw_axes()].iter().map(|pair| pair.0).collect();
794        let mut after: TVec<usize> = pad.pads[shape.hw_axes()].iter().map(|pair| pair.1).collect();
795        if let Explicit(bef, aft) = &self.pool_spec.padding {
796            izip!(&mut before, bef).for_each(|(pad, cv)| *pad += cv);
797            izip!(&mut after, aft).for_each(|(pad, cv)| *pad += cv);
798        }
799        let padding = Explicit(before, after);
800        let mut new = self.clone();
801        new.pool_spec.padding = padding;
802        let mut patch = TypedModelPatch::default();
803        let mut wire = patch.taps(model, &node.inputs)?;
804        wire[0] = patch.tap_model(model, prec.inputs[0])?;
805        let wire = patch.wire_node(&node.name, new, &wire)?;
806        patch.shunt_outside(model, node.id.into(), wire[0])?;
807        Ok(Some(patch))
808    }
809
810    fn declutter_channel_arithmetic_succ(
811        &self,
812        model: &TypedModel,
813        node: &TypedNode,
814    ) -> TractResult<Option<TypedModelPatch>> {
815        if self.q_params.is_some() || self.group != 1 {
816            return Ok(None);
817        }
818        let &[succ_outlet] = &*node.outputs[0].successors else { return Ok(None) };
819        let succ = model.node(succ_outlet.node);
820        let Some(bin) = succ.op_as::<TypedBinOp>() else { return Ok(None) };
821        let other_input = succ.inputs[1 - succ_outlet.slot];
822        let axes_mapping = model.node_axes_mapping(succ.id)?;
823        let input_shape =
824            self.pool_spec.data_format.shape(&model.outlet_fact(node.inputs[0])?.shape)?;
825        let conv_c_axis = input_shape.c_axis();
826        if axes_mapping.axis((InOut::In(succ_outlet.slot), conv_c_axis))?.inputs
827            [1 - succ_outlet.slot]
828            .len()
829            != 1
830        {
831            return Ok(None);
832        };
833        let mut other_expected_shape = tvec!(1.to_dim(); input_shape.rank());
834        other_expected_shape[conv_c_axis] = self.output_channels().to_dim();
835        if *other_expected_shape != *model.outlet_fact(other_input)?.shape {
836            return Ok(None);
837        }
838
839        let mut patch = TypedModelPatch::default();
840        let [input, mut kernel, mut bias] = &*patch.taps(model, &node.inputs)? else {
841            panic!("Expect three inputs");
842        };
843        let name = &node.name;
844        let succ_name = &succ.name;
845
846        let operand = patch.tap_model(model, other_input)?;
847
848        let renamed_bias = format!("{name}.{succ_name}.bias");
849        let renamed_kernel = format!("{name}.{succ_name}.kernel");
850        bias = wire_reshape_bias_for_bin(
851            &mut patch,
852            format!("{renamed_bias}.reshape"),
853            bias,
854            1,
855            0,
856            self.output_channels(),
857        )?[0];
858
859        let operand = wire_reshape_bias_for_bin(
860            &mut patch,
861            format!("{renamed_bias}.reshape_operand"),
862            operand,
863            1,
864            0,
865            self.output_channels(),
866        )?[0];
867
868        let operand_fact = patch.outlet_fact(operand)?.shape.to_tvec();
869        let kernel_fact = patch.outlet_fact(kernel)?;
870        let mut operand_shape_for_kernel = tvec!(1.to_dim(); 2 + input_shape.hw_rank());
871        operand_shape_for_kernel[self.kernel_fmt.o_axis(&kernel_fact.shape)] =
872            self.output_channels().to_dim();
873        let operand_for_kernel = patch.wire_node(
874            format!("{renamed_kernel}.reshape_operand"),
875            AxisOp::Reshape(0, operand_fact, operand_shape_for_kernel),
876            &[operand],
877        )?[0];
878
879        if bin.0.is::<Sub>() && succ_outlet.slot == 0 {
880            bias = patch.wire_node(&renamed_bias, sub(), &[bias, operand])?[0];
881        } else if bin.0.is::<Sub>() {
882            bias = patch.wire_node(&renamed_bias, sub(), &[operand, bias])?[0];
883        } else if bin.0.is::<Div>() && succ_outlet.slot == 0 {
884            bias = patch.wire_node(&renamed_bias, div(), &[bias, operand])?[0];
885            kernel = patch.wire_node(&renamed_kernel, div(), &[kernel, operand_for_kernel])?[0];
886        } else if bin.0.is::<Div>() {
887            bias = patch.wire_node(&renamed_bias, div(), &[operand, bias])?[0];
888            kernel = patch.wire_node(&renamed_kernel, div(), &[operand_for_kernel, kernel])?[0];
889        } else if bin.0.is::<Add>() {
890            bias = patch.wire_node(&renamed_bias, add(), &[bias, operand])?[0];
891        } else if bin.0.is::<Mul>() {
892            bias = patch.wire_node(&renamed_bias, mul(), &[bias, operand])?[0];
893            kernel = patch.wire_node(&renamed_kernel, mul(), &[kernel, operand_for_kernel])?[0];
894        } else {
895            return Ok(None);
896        };
897        let wire = patch.wire_node(&node.name, self.clone(), &[*input, kernel, bias])?[0];
898        patch.shunt_outside(model, succ_outlet.node.into(), wire)?;
899        Ok(Some(patch))
900    }
901}
902
903impl Op for Conv {
904    fn name(&self) -> Cow<str> {
905        "Conv".into()
906    }
907
908    fn info(&self) -> TractResult<Vec<String>> {
909        let mut info = self.pool_spec.info();
910        info.push(format!("Kernel {:?} (groups:{})", self.kernel_fmt, self.group));
911        Ok(info)
912    }
913
914    fn validation(&self) -> Validation {
915        Validation::Rounding
916    }
917
918    op_as_typed_op!();
919}
920
921impl EvalOp for Conv {
922    fn is_stateless(&self) -> bool {
923        true
924    }
925
926    fn eval(&self, inputs: TVec<TValue>) -> TractResult<TVec<TValue>> {
927        let mut model = TypedModel::default();
928        let wire: TVec<OutletId> = inputs
929            .iter()
930            .enumerate()
931            .map(|(ix, v)| model.add_source(format!("source.{ix}"), v.datum_type().fact(v.shape())))
932            .collect::<TractResult<_>>()?;
933        let wire = unsafe {
934            if self.q_params.is_some() {
935                self.wire_as_quant_im2col(&mut model, "im2col-adhoc", &wire)?
936            } else {
937                self.wire_as_im2col_pair(&mut model, "im2col-adhoc", &wire)?
938            }
939        };
940        model.set_output_outlets(&wire)?;
941        model.into_runnable()?.run(inputs)
942    }
943}
944
945impl TypedOp for Conv {
946    fn output_facts(&self, inputs: &[&TypedFact]) -> TractResult<TVec<TypedFact>> {
947        ensure!(self.q_params.is_some() || inputs[0].datum_type.is_float());
948        let q_inputs = if self.q_params.is_some() { 6 } else { 0 };
949        ensure!(inputs[1].datum_type.is_number() || self.kernel_fmt == KernelFormat::OIHW);
950        if inputs.len() != 3 + q_inputs {
951            bail!("Wrong number of inputs: expected {} got {}", 3 + q_inputs, inputs.len());
952        }
953        if self.q_params.is_some() {
954            ensure!(inputs[2].datum_type == i32::datum_type());
955            ensure!(inputs[3].datum_type == i32::datum_type());
956            ensure!(inputs[4].datum_type.is_float());
957            ensure!(inputs[5].datum_type == i32::datum_type());
958            ensure!(inputs[6].datum_type.is_float());
959            ensure!(inputs[7].datum_type == i32::datum_type());
960            ensure!(inputs[8].datum_type.is_float());
961        }
962        let weight_shape = block_quant_aware_weight_shape(inputs[1])?;
963        ensure!(self.pool_spec.rank() + 2 == weight_shape.len());
964        if self.pool_spec.data_format.shape(&*inputs[0].shape)?.c()
965            != &self.input_channels().to_dim()
966        {
967            bail!(
968                    "Inconsistent convolution: input is {:?}, but kernel expects {} input channels.\n{:?}",
969                    inputs[0],
970                    self.input_channels(),
971                    self
972                    );
973        }
974        if let ExplicitOnnxPool(bef, after, _) | Explicit(bef, after) = &self.pool_spec.padding {
975            anyhow::ensure!(bef.len() == self.pool_spec.rank());
976            anyhow::ensure!(after.len() == self.pool_spec.rank());
977        }
978        ensure!(
979            inputs[2].rank() == 0
980            || (inputs[2].rank() == 1
981                && inputs[2].shape.volume() == self.output_channels().to_dim()),
982                "Bias should be scalar or a vector with one value per output channel. Output channels is {}, bias is {:?}",
983                self.output_channels(),
984                inputs[2]
985               );
986        let mut fact = self.pool_spec.output_facts(inputs)?.remove(0);
987        if let Some(dt) = self.q_params {
988            fact.datum_type = dt;
989        } else {
990            ensure!(
991                inputs[1].datum_type.is_opaque() || inputs[0].datum_type == inputs[1].datum_type,
992                "Convolution input, weights and bias must have the same type, got {inputs:?}",
993            )
994        }
995        Ok(tvec!(fact))
996    }
997
998    fn axes_mapping(
999        &self,
1000        inputs: &[&TypedFact],
1001        outputs: &[&TypedFact],
1002    ) -> TractResult<AxesMapping> {
1003        let fact = &inputs[0];
1004        let shape = self.pool_spec.data_format.shape(&fact.shape)?;
1005        let mut axes = AxesMapping::disconnected(inputs, outputs)?
1006            .renaming((InOut::In(0), shape.c_axis()), 'I')?
1007            .renaming((InOut::Out(0), shape.c_axis()), 'O')?;
1008        if let Some(n_axis) = shape.n_axis() {
1009            axes = axes
1010                .renaming((InOut::In(0), n_axis), 'N')?
1011                .linking('N', (InOut::Out(0), n_axis))?;
1012        }
1013        let h_axis = shape.h_axis();
1014        let geo = "HWXYZ".chars().chain('a'..);
1015        let kernel_spatial_shape = &self.pool_spec.kernel_shape;
1016        let padding = self.pool_spec.computed_padding(shape.hw_dims());
1017        for ((ix, &dim), repr) in kernel_spatial_shape.iter().enumerate().zip(geo) {
1018            if dim == 1
1019                && self.pool_spec.dilation(ix) == 1
1020                && self.pool_spec.stride(ix) == 1
1021                && padding[ix].pad_before.is_zero()
1022                && padding[ix].pad_after.is_zero()
1023            {
1024                axes = axes
1025                    .renaming((InOut::In(0), ix + h_axis), repr)?
1026                    .linking(repr, (InOut::Out(0), ix + h_axis))?;
1027            }
1028        }
1029        if self.q_params.is_some() {
1030            for (qp_ix, qp) in inputs.iter().enumerate().skip(3) {
1031                if qp.rank() == 1 {
1032                    axes = match qp_ix {
1033                        3 | 4 => axes.linking('I', (InOut::In(qp_ix), 0))?,
1034                        5 | 6 => axes.linking('O', (InOut::In(qp_ix), 0))?,
1035                        7 | 8 => axes.linking('O', (InOut::In(qp_ix), 0))?,
1036                        _ => unreachable!(),
1037                    };
1038                }
1039            }
1040        }
1041        Ok(axes)
1042    }
1043
1044    fn declutter(
1045        &self,
1046        model: &TypedModel,
1047        node: &TypedNode,
1048    ) -> TractResult<Option<TypedModelPatch>> {
1049        macro_rules! pass {
1050            ($func:ident) => {
1051                if let Some(mut r) = self.$func(model, node).context(stringify!($func))? {
1052                    trace!(stringify!($func));
1053                    r.push_context(stringify!($func));
1054                    return Ok(Some(r));
1055                }
1056            };
1057        }
1058        pass!(declutter_stride_slice_to_downsample);
1059        pass!(declutter_as_einsum);
1060        pass!(declutter_channel_arithmetic_succ);
1061        pass!(declutter_precursor_padding);
1062        Ok(None)
1063    }
1064
1065    fn cost(&self, inputs: &[&TypedFact]) -> TractResult<TVec<(Cost, TDim)>> {
1066        let shape = self.pool_spec.data_format.shape(inputs[0].shape.to_tvec())?;
1067        let kernel_spatial_shape = &self.pool_spec.kernel_shape;
1068        let output_dims = self.pool_spec.padding.compute(
1069            shape.hw_dims(),
1070            kernel_spatial_shape,
1071            &self
1072                .pool_spec
1073                .dilations
1074                .clone()
1075                .unwrap_or_else(|| tvec!(1; kernel_spatial_shape.len())),
1076            &self.pool_spec.strides.clone().unwrap_or_else(|| tvec!(1; kernel_spatial_shape.len())),
1077        );
1078        let n_output_points: TDim =
1079            output_dims.iter().map(|d| d.convoluted.clone()).product::<TDim>();
1080        let n_output_channels = self.output_channels().to_dim();
1081        let kernel_surface = kernel_spatial_shape.iter().product::<usize>().to_dim();
1082        let one = 1.to_dim();
1083        Ok(tvec!((
1084            Cost::FMA(inputs[0].datum_type),
1085            shape.n().cloned().unwrap_or(one)
1086                * shape.c()
1087                * n_output_channels
1088                * n_output_points
1089                * kernel_surface
1090                / self.group
1091        )))
1092    }
1093
1094    fn change_axes(
1095        &self,
1096        model: &TypedModel,
1097        node: &TypedNode,
1098        io: InOut,
1099        change: &AxisOp,
1100    ) -> TractResult<Option<AxisChangeConsequence>> {
1101        if io == InOut::In(1) {
1102            return Ok(None);
1103        }
1104        if io == InOut::In(2) {
1105            if let &AxisOp::Rm(_) = change {
1106                return Ok(Some(AxisChangeConsequence {
1107                    substitute_op: Some(Box::new(self.clone())),
1108                    wire_changes: tvec!(),
1109                }));
1110            }
1111        }
1112        let full_input_shape = model.outlet_fact(node.inputs[0])?.shape.to_tvec();
1113        let shape = self.pool_spec.data_format.shape(full_input_shape.clone())?;
1114        // remove n
1115        if let Some(n) = shape.n_axis() {
1116            assert_eq!(n, 0);
1117            if change == &AxisOp::Rm(n) {
1118                let op = Conv { pool_spec: self.pool_spec.dispose_n_axis(), ..self.clone() };
1119                return Ok(Some(AxisChangeConsequence {
1120                    substitute_op: Some(Box::new(op)),
1121                    wire_changes: tvec!(
1122                        (InOut::In(0), change.clone()),
1123                        (InOut::Out(0), change.clone())
1124                    ),
1125                }));
1126            }
1127            if change.transform_axis(n).map(|axis| axis > 0).unwrap_or(true) {
1128                return Ok(None);
1129            }
1130        }
1131        // format swap: chw <-> hwc
1132        let (new_format, axis_move) = match self.pool_spec.data_format {
1133            DataFormat::NCHW => {
1134                (DataFormat::NHWC, AxisOp::Move(shape.c_axis(), full_input_shape.len() - 1))
1135            }
1136            DataFormat::CHW => {
1137                (DataFormat::HWC, AxisOp::Move(shape.c_axis(), full_input_shape.len() - 1))
1138            }
1139            DataFormat::NHWC => (DataFormat::NCHW, AxisOp::Move(shape.c_axis(), 1)),
1140            DataFormat::HWC => (DataFormat::CHW, AxisOp::Move(shape.c_axis(), 0)),
1141        };
1142        if *change == axis_move {
1143            let mut new_op = self.clone();
1144            new_op.pool_spec.data_format = new_format;
1145            return Ok(Some(AxisChangeConsequence {
1146                substitute_op: Some(Box::new(new_op)),
1147                wire_changes: tvec!(
1148                    (InOut::In(0), change.clone()),
1149                    (InOut::Out(0), change.clone())
1150                ),
1151            }));
1152        }
1153        // geo axis manips
1154        if model.node_input_facts(node.id)?[1].datum_type.is_opaque() {
1155            return Ok(None);
1156        }
1157        use AxisOp::*;
1158        let h_axis = shape.h_axis();
1159        let hw_axes = shape.hw_axes();
1160        let kh_axis = self.kernel_fmt.h_axis();
1161        let (geo_adjusted, kernel_adjusted) = match change {
1162            Rm(a)
1163                if hw_axes.contains(a)
1164                    && hw_axes.len() > 1
1165                    && self.pool_spec.dilation(a - h_axis) == 1
1166                    && self.pool_spec.stride(a - h_axis) == 1
1167                    && self.pool_spec.kernel_shape[a - h_axis] == 1 =>
1168            {
1169                let geo_axis = a - h_axis;
1170                (Rm(geo_axis), Rm(kh_axis + geo_axis))
1171            }
1172            Add(a) if hw_axes.contains(a) => (Add(a - h_axis), Add(a - h_axis + kh_axis)),
1173            Move(f, t) if hw_axes.contains(f) && hw_axes.contains(t) => {
1174                (Move(f - h_axis, t - h_axis), Move(f - h_axis + kh_axis, t - h_axis + kh_axis))
1175            }
1176            _ => return Ok(None),
1177        };
1178        let pool_spec = self.pool_spec.change_geo_axes(&geo_adjusted)?;
1179        let new_op = Conv { pool_spec, ..self.clone() };
1180        Ok(Some(AxisChangeConsequence {
1181            substitute_op: Some(Box::new(new_op)),
1182            wire_changes: tvec!(
1183                (InOut::In(0), change.clone()),
1184                (InOut::In(1), kernel_adjusted),
1185                (InOut::Out(0), change.clone())
1186            ),
1187        }))
1188    }
1189
1190    fn codegen(
1191        &self,
1192        model: &TypedModel,
1193        node: &TypedNode,
1194    ) -> TractResult<Option<TypedModelPatch>> {
1195        let input_fact = model.outlet_fact(node.inputs[0])?;
1196        unsafe {
1197            if self.q_params.is_some() {
1198                let mut patch = TypedModelPatch::default();
1199                let inputs = patch.taps(model, &node.inputs)?;
1200                let wire = self
1201                    .wire_as_quant_im2col(&mut patch, &node.name, &inputs)
1202                    .context("in wire_as_quant_im2col")?;
1203                patch.shunt_outside(model, node.id.into(), wire[0])?;
1204                patch.obliterate(node.id)?;
1205                Ok(Some(patch.with_context("quantized-codegen")))
1206            } else if input_fact
1207                .shape
1208                .as_concrete()
1209                .map(|s| {
1210                    should_use_lazy(
1211                        &self.pool_spec.data_format.shape(s.into()).unwrap(),
1212                        &self.pool_spec,
1213                        self.group,
1214                    )
1215                })
1216                .unwrap_or(false)
1217            {
1218                let mut patch = TypedModelPatch::new("wire_as_lazy_im2col");
1219                let inputs = patch.taps(model, &node.inputs)?;
1220                let wire = self
1221                    .wire_as_lazy_im2col(&mut patch, &node.name, &inputs)
1222                    .context("wire_as_lazy_im2col")?[0];
1223                patch.shunt_outside(model, OutletId::new(node.id, 0), wire)?;
1224                patch.obliterate(node.id)?;
1225                Ok(Some(patch))
1226            } else if self.group != 1
1227                && self.group == self.output_channels()
1228                && self.group == self.input_channels()
1229                && input_fact.shape.as_concrete().is_some()
1230            {
1231                let mut patch = TypedModelPatch::default();
1232                let inputs = patch.taps(model, &node.inputs)?;
1233                let wire = self
1234                    .wire_as_depth_wise(&mut patch, &node.name, &inputs)
1235                    .context("wire_as_depth_wise")?;
1236                patch.shunt_outside(model, OutletId::new(node.id, 0), wire)?;
1237                patch.obliterate(node.id)?;
1238                Ok(Some(patch))
1239            } else {
1240                let mut patch = TypedModelPatch::default();
1241                let inputs = patch.taps(model, &node.inputs)?;
1242                let wire = self
1243                    .wire_as_im2col_pair(&mut patch, &node.name, &inputs)
1244                    .context("in wire_as_im2col_pair")?[0];
1245                patch.shunt_outside(model, OutletId::new(node.id, 0), wire)?;
1246                patch.obliterate(node.id)?;
1247                Ok(Some(patch))
1248            }
1249        }
1250    }
1251
1252    as_op!();
1253}
1254
1255fn should_use_lazy(input_shape: &DataShape, pool_spec: &PoolSpec, group: usize) -> bool {
1256    input_shape.n().unwrap_or(&1) == &1
1257        && group == 1
1258        && pool_spec.kernel_shape.iter().product::<usize>() > 5
1259}
1260
1261#[allow(non_snake_case)]
1262#[cfg(test)]
1263mod test {
1264    use super::*;
1265    use crate::ops::array::Pad;
1266    use DataFormat::*;
1267
1268    #[test]
1269    fn onnx_basic_convinteger() {
1270        let op = Conv {
1271            pool_spec: PoolSpec {
1272                data_format: NCHW,
1273                kernel_shape: tvec!(2, 2),
1274                padding: Valid,
1275                dilations: None,
1276                strides: None,
1277                input_channels: 1,
1278                output_channels: 1,
1279            },
1280            kernel_fmt: KernelFormat::OIHW,
1281            group: 1,
1282            q_params: Some(i32::datum_type()),
1283        };
1284        let input = tvec!(
1285            rctensor4(&[[[[1u8, 2, 3], [4, 5, 6], [7, 8, 9]]]]),
1286            rctensor4(&[[[[1u8, 1], [1, 1]]]]),
1287            rctensor0(0u32),
1288            rctensor0(1u8),
1289            rctensor0(1.0f32),
1290            rctensor0(0u8),
1291            rctensor0(1.0f32),
1292            rctensor0(0i32),
1293            rctensor0(1.0f32),
1294        );
1295        let input = input.into_iter().map(IntoTValue::into_tvalue).collect::<TVec<_>>();
1296        let output = op.eval(input).unwrap();
1297        assert_eq!(*output[0], tensor4(&[[[[8i32, 12], [20, 24]]]]));
1298    }
1299
1300    #[test]
1301    fn valid_conv_absorbs_precursor_pad() -> TractResult<()> {
1302        let mut model = TypedModel::default();
1303        let wire = tvec!(model.add_source("source", f32::fact(dims!(1, 10)))?);
1304        let wire = model.wire_node(
1305            "pad",
1306            Pad {
1307                pads: vec![(0, 0), (1, 0)],
1308                mode: ops::array::PadMode::Constant(rctensor0(0f32)),
1309            },
1310            &wire,
1311        )?;
1312        let kernel = model.add_const("kernel", rctensor3(&[[[1f32, 2f32]]]))?;
1313        let bias = model.add_const("bias", rctensor0(0f32))?;
1314        let wire = model.wire_node(
1315            "conv",
1316            Conv {
1317                pool_spec: PoolSpec {
1318                    data_format: crate::ops::nn::DataFormat::CHW,
1319                    dilations: None,
1320                    strides: None,
1321                    kernel_shape: tvec![2],
1322                    padding: Explicit(tvec![0], tvec![0]),
1323                    input_channels: 1,
1324                    output_channels: 1,
1325                },
1326                kernel_fmt: crate::ops::cnn::KernelFormat::OIHW,
1327                group: 1,
1328                q_params: None,
1329            },
1330            &[wire[0], kernel, bias],
1331        )?;
1332        model.set_output_outlets(&wire)?;
1333        model.declutter()?;
1334        assert_eq!(model.nodes().len(), 4); // source + conv + kernel + bias
1335        let cv = model.nodes()[3].op_as::<Conv>().unwrap();
1336        assert_eq!(cv.pool_spec.padding, Explicit(tvec![1], tvec![0])); // source + conv
1337        Ok(())
1338    }
1339}