1use std::fmt::Formatter;
2use std::ops::Deref;
3
4use tract_itertools::{izip, multiunzip};
5use tract_linalg::block_quant::PackedBlockQuantFormat;
6use tract_linalg::pack::PackedFormat;
7
8use super::*;
9use crate::ops::cast::cast;
10use crate::ops::math::add;
11use crate::ops::matmul::optimized::{
12 AddMatMulGeometry, MapOutputAxisToInput, OptMatMul, ProtoFusedSpec,
13};
14use crate::ops::matmul::pack::{OptMatMulPack, OptSimpleMatMulPack};
15use crate::ops::matmul::quant::{
16 combine_scales, compensate_zero_points, requant, wire_ensure_q8_flavour,
17};
18use crate::ops::matmul::ModePicker;
19use crate::ops::nn::{Reduce, Reducer};
20
21pub fn detect_all(model: &mut TypedModel) -> TractResult<()> {
22 Rewriter::default().with_rule_for("detect-matmul-einsum", detect_rule).rewrite(&(), model)
23}
24
25pub fn flatten_all(model: &mut TypedModel) -> TractResult<()> {
26 Rewriter::default().with_rule_for("flatten-matmul-einsum", flatten_rule).rewrite(&(), model)
27}
28
29#[derive(Clone, Hash, PartialEq)]
30pub struct EinSumMatMul {
31 pub op: EinSum,
32 pub m_axis: char,
33 pub k_axis: char,
34 pub n_axis: char,
35 pub m: TDim,
36 pub k: TDim,
37 pub n: TDim,
38}
39
40impl EinSumMatMul {
41 pub fn m_axis(&self) -> &Axis {
42 self.op.axes.axis(self.m_axis).unwrap()
43 }
44 pub fn k_axis(&self) -> &Axis {
45 self.op.axes.axis(self.k_axis).unwrap()
46 }
47 pub fn n_axis(&self) -> &Axis {
48 self.op.axes.axis(self.n_axis).unwrap()
49 }
50 pub fn a_m(&self) -> usize {
51 self.m_axis().inputs[0][0]
52 }
53 pub fn a_k(&self) -> usize {
54 self.k_axis().inputs[0][0]
55 }
56 pub fn b_k(&self) -> usize {
57 self.k_axis().inputs[1][0]
58 }
59 pub fn b_n(&self) -> usize {
60 self.n_axis().inputs[1][0]
61 }
62 pub fn c_m(&self) -> Option<usize> {
63 self.m_axis().outputs[0].first().cloned()
64 }
65 pub fn c_n(&self) -> Option<usize> {
66 self.n_axis().outputs[0].first().cloned()
67 }
68
69 fn new(
70 op: EinSum,
71 m_axis: char,
72 k_axis: char,
73 n_axis: char,
74 m: TDim,
75 k: TDim,
76 n: TDim,
77 ) -> Self {
78 Self { op, m_axis, k_axis, n_axis, m, k, n }
79 }
80}
81
82impl Debug for EinSumMatMul {
83 fn fmt(&self, f: &mut Formatter) -> std::fmt::Result {
84 write!(
85 f,
86 "EinsumMatMul: {} {:?} m: {}={}; k: {}={}; n: {}={}",
87 self.op.axes,
88 self.op.operating_dt,
89 self.m_axis,
90 self.m,
91 self.k_axis,
92 self.k,
93 self.n_axis,
94 self.n
95 )
96 }
97}
98
99impl Deref for EinSumMatMul {
100 type Target = EinSum;
101 fn deref(&self) -> &Self::Target {
102 &self.op
103 }
104}
105
106impl Op for EinSumMatMul {
107 fn name(&self) -> StaticName {
108 "EinSumMatMul".into()
109 }
110
111 op_as_typed_op!();
112 impl_op_same_as!();
113}
114
115impl EvalOp for EinSumMatMul {
116 fn is_stateless(&self) -> bool {
117 true
118 }
119 fn eval_with_session(
120 &self,
121 node_id: usize,
122 session: &SessionState,
123 inputs: TVec<TValue>,
124 ) -> TractResult<TVec<TValue>> {
125 self.op.eval_with_session(node_id, session, inputs)
126 }
127}
128
129impl TypedOp for EinSumMatMul {
130 fn output_facts(&self, inputs: &[&TypedFact]) -> TractResult<TVec<TypedFact>> {
131 self.op.output_facts(inputs)
132 }
133
134 fn codegen(
135 &self,
136 model: &TypedModel,
137 node: &TypedNode,
138 ) -> TractResult<Option<TypedModelPatch>> {
139 if node.inputs.len() == 9 {
141 ensure!(self.op.q_params.is_some());
142 return dequant(model, node, self).map(Some);
143 }
144 ensure!(node.inputs.len() == 2);
145 let (a, b) = model.node_input_facts(node.id)?.into_iter().collect_tuple().unwrap();
146 let must_transpose = if let Some(of) = a.opaque_fact() {
148 ensure!(of.is::<BlockQuantFact>());
149 false
150 } else if let Some(of) = b.opaque_fact() {
151 ensure!(of.is::<BlockQuantFact>());
152 true
153 } else if self.m == self.n {
154 false
155 } else {
156 match (self.m.as_i64(), self.n.as_i64()) {
157 (Some(m), Some(n)) => m < n,
158 (None, Some(n)) => n >= 8,
159 _ => (self.n.clone() - &self.m).prove_positive_or_zero(),
160 }
161 };
162 if must_transpose {
163 let mut op = self.clone();
164 op.op.axes.iter_all_axes_mut().for_each(|axis| axis.inputs.swap(0, 1));
165 std::mem::swap(&mut op.m_axis, &mut op.n_axis);
166 std::mem::swap(&mut op.m, &mut op.n);
167 return TypedModelPatch::replace_single_op(
168 model,
169 node,
170 &[node.inputs[1], node.inputs[0]],
171 op,
172 )
173 .map(Some);
174 }
175 if self.c_m().is_some() || self.c_n().is_some() {
177 return optimized_mat_mul(model, node, self);
178 }
179 Ok(None)
180 }
181
182 as_op!();
183}
184
185pub(crate) fn detect_rule(
186 _ctx: &(),
187 model: &TypedModel,
188 node: &TypedNode,
189 _name: &str,
190 op: &EinSum,
191) -> TractResult<Option<TypedModelPatch>> {
192 if node.inputs.len() != (2 + op.q_params.is_some() as usize * 7) {
193 return Ok(None);
194 }
195 let input_facts = model.node_input_facts(node.id)?;
196 let input_shapes = op.actual_input_shapes_from_facts(&input_facts)?;
197 let output_shape = super::eval::output_shape(&op.axes, &input_shapes)?;
198 let k_axes: TVec<&Axis> = op
199 .axes
200 .iter_all_axes()
201 .filter(|a| a.inputs[0].len() == 1 && a.inputs[1].len() == 1 && a.outputs[0].is_empty())
203 .collect();
204
205 let non_trivial_k_axis = k_axes
206 .iter()
207 .filter(|a| {
208 !input_shapes[0][a.inputs[0][0]].is_one() || !input_shapes[1][a.inputs[1][0]].is_one()
209 })
210 .copied()
211 .collect::<TVec<_>>();
212
213 let k_axis = if non_trivial_k_axis.len() > 1 {
214 return regroup_k_axes(op, model, node, non_trivial_k_axis);
215 } else {
216 non_trivial_k_axis.first().or_else(|| k_axes.first()).copied()
217 };
218 let Some(k_axis) = k_axis else { return inject_k_axis(op, model, node).map(Some) };
219
220 let mut possible_m_axes: Vec<_> = op
221 .axes
222 .iter_all_axes()
223 .filter(|a| {
224 a.inputs[0].len() == 1
225 && (a.inputs[1].is_empty() || input_shapes[1][a.inputs[1][0]].is_one())
226 && (a.outputs[0].len() == 1
227 || (input_shapes[0][a.inputs[0][0]].is_one() && a.inputs[1].is_empty()))
228 })
229 .collect();
230
231 if possible_m_axes.iter().any(|a| !a.outputs[0].is_empty()) {
233 possible_m_axes.retain(|a| !a.outputs[0].is_empty());
234 }
235
236 let m_axis = possible_m_axes
237 .into_iter()
238 .max_by_key(|a| input_shapes[0][a.inputs[0][0]].as_i64().unwrap_or(i64::MAX));
239
240 let Some(m_axis) = m_axis else {
241 return inject_m_or_n_axis(op, model, node, false).map(Some);
242 };
243
244 let n_axis = op
245 .axes
246 .iter_all_axes()
247 .filter(|a| {
248 (a.inputs[0].is_empty() || input_shapes[0][a.inputs[0][0]].is_one())
249 && a.inputs[1].len() == 1
250 && a.outputs[0].len() == 1
251 && *a != m_axis
252 })
253 .max_by_key(|a| input_shapes[1][a.inputs[1][0]].as_i64().unwrap_or(i64::MAX));
254 let Some(n_axis) = n_axis else {
255 return inject_m_or_n_axis(op, model, node, true).map(Some);
256 };
257 for axis in op.axes.iter_all_axes() {
258 let one = TDim::one();
259 let in_left =
260 axis.inputs[0].first().map(|pos| &input_shapes[0][*pos]).unwrap_or(&one) != &one;
261 let in_right =
262 axis.inputs[1].first().map(|pos| &input_shapes[1][*pos]).unwrap_or(&one) != &one;
263 let in_out = axis.outputs[0].first().map(|pos| &output_shape[*pos]).unwrap_or(&one) != &one;
264 if (in_left ^ in_right) && !in_out {
265 return Ok(None);
266 }
271 }
272 let m = input_shapes[0][m_axis.inputs[0][0]].clone();
273 let k = input_shapes[0][k_axis.inputs[0][0]].clone();
274 let n = input_shapes[1][n_axis.inputs[1][0]].clone();
275 TypedModelPatch::replace_single_op(
276 model,
277 node,
278 &node.inputs,
279 EinSumMatMul::new(op.clone(), m_axis.repr, k_axis.repr, n_axis.repr, m, k, n),
280 )
281 .map(Some)
282}
283
284pub(super) fn inject_k_axis(
285 op: &EinSum,
286 model: &TypedModel,
287 node: &TypedNode,
288) -> TractResult<TypedModelPatch> {
289 let mut new_axes = op.axes.clone();
290 let name = &node.name;
291 let mut patch = TypedModelPatch::new("inject k axis");
292 let mut wire = patch.taps(model, &node.inputs)?;
293 let repr = new_axes.available_label();
294 new_axes = new_axes.with_extra_axis(repr, InOut::In(0), 0)?.with_extra_axis_occurency(
295 repr,
296 InOut::In(1),
297 0,
298 )?;
299 wire[0] = patch.wire_node(format!("{name}.add_k.0"), AxisOp::Add(0), &[wire[0]])?[0];
300 wire[1] = patch.wire_node(format!("{name}.add_k.1"), AxisOp::Add(0), &[wire[1]])?[0];
301 wire = patch.wire_node(&node.name, EinSum { axes: new_axes, ..op.clone() }, &wire)?;
302 patch.shunt_outside(model, node.id.into(), wire[0])?;
303 Ok(patch)
304}
305
306pub(super) fn regroup_k_axes(
307 op: &EinSum,
308 model: &TypedModel,
309 node: &TypedNode,
310 mut k_axes: TVec<&Axis>,
311) -> TractResult<Option<TypedModelPatch>> {
312 let input_facts = model.node_input_facts(node.id)?;
313 let input_shapes = op.actual_input_shapes_from_facts(&input_facts)?;
314 let contig_in_a = k_axes
315 .iter()
316 .map(|axis| axis.inputs[0][0])
317 .sorted()
318 .tuple_windows()
319 .all(|(a, b)| a + 1 == b);
320 if contig_in_a {
321 k_axes.sort_by_key(|ax| ax.inputs[0][0]);
322 } else {
323 k_axes.sort_by_key(|ax| ax.inputs[1][0]);
324 }
325 let k_dims: TVec<_> =
326 k_axes.iter().map(|ax| input_shapes[0][ax.inputs[0][0]].clone()).collect();
327 let k: TDim = k_dims.iter().product();
328 let mut patch = TypedModelPatch::default();
329 let mut wires = patch.taps(model, &node.inputs)?;
330 let mut exprs: Vec<String> =
331 (0..2).map(|slot| op.axes.axes(InOut::In(slot)).map(|ax| ax.repr).join("")).collect();
332 for slot in 0..2 {
333 if k_axes.iter().map(|ax| ax.inputs[slot][0]).tuple_windows().any(|(a, b)| a + 1 != b) {
334 let after = op
335 .axes
336 .axes(InOut::In(slot))
337 .filter(|ax| !k_axes.contains(ax))
338 .chain(k_axes.iter().copied())
339 .map(|ax| ax.repr)
340 .join("");
341 let transpose =
342 AxesMapping::from_strs(&[&exprs[slot]], &[&after])?.translate_to_axis_ops()?;
343 for (ix, op) in transpose.into_iter().enumerate() {
344 wires[slot] = patch.wire_node(
345 format!("{}.transpose_input_{}.{}", &node.name, slot, ix),
346 op,
347 &[wires[slot]],
348 )?[0];
349 }
350 exprs[slot] = after;
351 }
352 let pos = exprs[slot].chars().position(|c| k_axes[0].repr == c).unwrap();
353 wires[slot] = patch.wire_node(
354 format!("{}.fold_k_in_input_{}", &node.name, slot),
355 AxisOp::Reshape(pos, k_dims.clone(), tvec!(k.clone())),
356 &[wires[slot]],
357 )?[0];
358 exprs[slot] =
359 exprs[slot].chars().filter(|c| !k_axes.iter().any(|k| k.repr == *c)).collect();
360 exprs[slot].insert(pos, k_axes[0].repr);
361 }
362 let old = op.axes.to_string();
363 let (iexpr, oexpr) = old.split_once("->").unwrap();
364 let mut expr: String = exprs.iter().join(",");
365 if node.inputs.len() > 2 {
366 expr = expr + "," + &iexpr.split(",").skip(2).join(",");
367 }
368 expr = expr + "->" + oexpr;
369 let wire = patch.wire_node(
370 &node.name,
371 EinSum { axes: expr.parse().unwrap(), ..op.clone() },
372 &wires,
373 )?[0];
374 patch.shunt_outside(model, node.id.into(), wire)?;
375 Ok(Some(patch))
376}
377
378pub(super) fn inject_m_or_n_axis(
379 op: &EinSum,
380 model: &TypedModel,
381 node: &TypedNode,
382 is_n: bool,
383) -> TractResult<TypedModelPatch> {
384 let input_to_fix = is_n as usize;
385 let label = if is_n { "n" } else { "m" };
386 let name = &node.name;
387 let mut patch = TypedModelPatch::new("Injecting m or n axis");
388 let mut wire = patch.taps(model, &node.inputs)?;
389 let repr = op.axes.available_label();
390 let new_axes = op
391 .axes
392 .clone()
393 .with_extra_axis(repr, InOut::In(input_to_fix), 0)?
394 .with_extra_axis_occurency(repr, InOut::Out(0), 0)?;
395 wire[input_to_fix] =
396 patch.wire_node(format!("{name}.add_{label}"), AxisOp::Add(0), &[wire[input_to_fix]])?[0];
397 wire = patch.wire_node(name, EinSum { axes: new_axes, ..op.clone() }, &wire)?;
398 wire = patch.wire_node(&node.name, AxisOp::Rm(0), &wire)?;
399 patch.shunt_outside(model, node.id.into(), wire[0])?;
400 Ok(patch)
401}
402
403fn wire_axes_fix(
404 patch: &mut TypedModelPatch,
405 name: &str,
406 var: &str,
407 mapping: &AxesMapping,
408 mut outlet: TVec<OutletId>,
409) -> TractResult<TVec<OutletId>> {
410 for (ix, axis_op) in mapping.translate_to_axis_ops()?.into_iter().enumerate() {
411 outlet = patch.wire_node(format!("{name}.fix_{var}.{ix})"), axis_op, &outlet)?;
412 }
413 Ok(outlet)
414}
415
416fn dequant(
417 model: &TypedModel,
418 node: &TypedNode,
419 op: &EinSumMatMul,
420) -> TractResult<TypedModelPatch> {
421 let name = &node.name;
422 let mut patch = TypedModelPatch::new("Dequantizing einsum");
423
424 let k_axis = op.k_axis();
425
426 let mut taps = patch.taps(model, &node.inputs)?;
427 for ab in [0, 1] {
428 let scale_input = 4 + ab * 2;
429 if !patch.outlet_fact(taps[scale_input])?.shape.volume().is_one() {
430 let q_axis_in_output = op.axes.axis((InOut::In(scale_input), 0))?.outputs[0][0];
431 let output_rank = node.outputs[0].fact.rank();
432 for i in 1..(output_rank - q_axis_in_output) {
433 taps[scale_input] = patch.wire_node(
434 format!("{name}.scale_input{ab}_axis_fix_{i}"),
435 AxisOp::Add(i),
436 &[taps[scale_input]],
437 )?[0];
438 }
439 }
440 }
441
442 let [mut a, mut b, bias, mut a0, a_scale, mut b0, b_scale, c0, c_scale] = *taps else {
443 bail!("Expect exactly 9 inputs")
444 };
445
446 wire_ensure_q8_flavour(&mut patch, &node.name, &mut a, "a", &mut a0, i8::datum_type())?;
447 wire_ensure_q8_flavour(&mut patch, &node.name, &mut b, "b", &mut b0, i8::datum_type())?;
448
449 let mut output = patch.wire_node(
450 &node.name,
451 EinSum {
452 q_params: None,
453 axes: op.axes.extract_sub_mapping(&[0, 1], &[0])?,
454 operating_dt: op.operating_dt,
455 },
456 &[a, b],
457 )?;
458
459 let a_i32 = patch.wire_node(format!("{name}.a_as_i32"), cast(i32::datum_type()), &[a])?[0];
460 let b_i32 = patch.wire_node(format!("{name}.b_as_i32"), cast(i32::datum_type()), &[b])?[0];
461 let sum_a = patch.wire_node(
462 format!("{name}.sum_a"),
463 Reduce::new(tvec!(k_axis.inputs[0][0]), Reducer::Sum),
464 &[a_i32],
465 )?;
466 let sum_b = patch.wire_node(
467 format!("{name}.sum_b"),
468 Reduce::new(tvec!(k_axis.inputs[1][0]), Reducer::Sum),
469 &[b_i32],
470 )?;
471
472 let sum_a =
473 wire_axes_fix(&mut patch, name, "sum_a", &op.axes.extract_sub_mapping(&[0], &[0])?, sum_a)?;
474 let sum_b =
475 wire_axes_fix(&mut patch, name, "sum_b", &op.axes.extract_sub_mapping(&[1], &[0])?, sum_b)?;
476 let bias = tvec!(bias);
477 let bias =
478 wire_axes_fix(&mut patch, name, "bias", &op.axes.extract_sub_mapping(&[2], &[0])?, bias)?;
479
480 let abc_scale = combine_scales(&mut patch, name, a_scale, b_scale, c_scale)?;
481
482 output = patch.wire_node(format!("{name}.add_bias"), add(), &[output[0], bias[0]])?;
483
484 let k = model.outlet_fact(node.inputs[0])?.shape[k_axis.inputs[0][0]].clone();
485 let output = compensate_zero_points(&mut patch, name, output[0], k, a0, b0, sum_a[0], sum_b[0])
486 .context("Zero point compensation")?;
487 let output = requant(&mut patch, name, output, op.q_params.unwrap(), abc_scale, c0)?;
488 patch.shunt_outside(model, node.id.into(), output)?;
489 Ok(patch)
490}
491
492fn flatten_rule(
493 _ctx: &(),
494 model: &TypedModel,
495 node: &TypedNode,
496 _name: &str,
497 op: &EinSumMatMul,
498) -> TractResult<Option<TypedModelPatch>> {
499 TypedModelPatch::replace_single_op(model, node, &node.inputs, op.op.clone()).map(Some)
500}
501
502fn optimized_mat_mul(
503 model: &TypedModel,
504 node: &TypedNode,
505 op: &EinSumMatMul,
506) -> TractResult<Option<TypedModelPatch>> {
507 let (mode_picker, left_pack, impls) = kernel_selection::strategize(model, node, op)?;
508 let input_facts = model.node_input_facts(node.id)?;
509 let input_shapes = op.actual_input_shapes_from_facts(&input_facts)?;
510 let prefix = &node.name;
511
512 let mut patch = TypedModelPatch::new("Einsum to OptMatMul");
513 let taps = patch.taps(model, &node.inputs)?;
514 let name = &node.name;
515
516 let pack_a: Box<dyn TypedOp> = if input_facts[0].konst.is_some() {
517 if let Some(pf) = left_pack.downcast_ref::<PackedFormat>() {
518 Box::new(OptMatMulPack {
519 packers: vec![pf.clone()],
520 mode_picker: ModePicker::Single,
521 k_axis: op.a_k(),
522 mn_axis: op.a_m(),
523 })
524 } else if let Some(packed_format) =
525 left_pack.downcast_ref::<PackedBlockQuantFormat>().cloned()
526 {
527 Box::new(OptSimpleMatMulPack {
528 packed_format,
529 k: input_shapes[0][op.a_k()].to_usize().unwrap(),
530 m: input_shapes[0][op.a_m()].to_usize().unwrap(),
531 })
532 } else {
533 bail!("Unexpected static input format {left_pack:?}");
534 }
535 } else {
536 Box::new(OptMatMulPack {
537 packers: impls
538 .iter()
539 .map(|(mmm, p, pe)| {
540 pe.as_ref()
541 .map(|pe| &pe.from)
542 .unwrap_or(&mmm.packings()[*p].0)
543 .downcast_ref::<PackedFormat>()
544 .unwrap()
545 .clone()
546 })
547 .collect(),
548 mode_picker: mode_picker.clone(),
549 k_axis: op.a_k(),
550 mn_axis: op.a_m(),
551 })
552 };
553 let pa = patch.wire_node(format!("{prefix}.pack_a"), pack_a, &[taps[0]])?[0];
554
555 let pb = patch.wire_node(
556 format!("{prefix}.pack_b"),
557 OptMatMulPack {
558 k_axis: op.b_k(),
559 mn_axis: op.b_n(),
560 packers: impls
561 .iter()
562 .map(|(mmm, p, _)| {
563 mmm.packings()[*p].1.downcast_ref::<PackedFormat>().unwrap().clone()
564 })
565 .collect(),
566 mode_picker: mode_picker.clone(),
567 },
568 &[taps[1]],
569 )?[0];
570
571 let mut c_to_a_axis_mapping = tvec!();
572 let mut c_to_b_axis_mapping = tvec!();
573 for axis in op
574 .op
575 .axes
576 .iter_all_axes()
577 .filter(|&axis| ![op.m_axis, op.k_axis, op.n_axis].contains(&axis.repr))
578 {
579 if let (&[c], &[a]) = (&*axis.outputs[0], &*axis.inputs[0]) {
580 if input_shapes[0][a] != 1.to_dim() {
581 let a = a - (a > op.a_m()) as usize - (a > op.a_k()) as usize;
582 c_to_a_axis_mapping.push((c, a));
583 }
584 }
585 if let (&[c], &[b]) = (&*axis.outputs[0], &*axis.inputs[1]) {
586 if input_shapes[1][b] != 1.to_dim() {
587 let b = b - (b > op.b_n()) as usize - (b > op.b_k()) as usize;
588 c_to_b_axis_mapping.push((c, b));
589 }
590 }
591 }
592
593 let c_fact = op.output_facts(&input_facts)?.remove(0);
594 let geo = AddMatMulGeometry {
595 k: op.k.clone(),
596 c_to_a_axis_mapping: MapOutputAxisToInput(c_to_a_axis_mapping),
597 c_to_b_axis_mapping: MapOutputAxisToInput(c_to_b_axis_mapping),
598 };
599 let (mmms, packings, extractor): (Vec<_>, Vec<_>, Vec<_>) = multiunzip(impls);
600 let outputs = mmms.iter().map(|mmm| unsafe { mmm.c_view(op.c_m(), op.c_n()) }).collect();
601 let trivial_packing = mmms.len() == 1
602 && packings[0] == 0
603 && extractor[0].is_none()
604 && input_facts[0].opaque_fact.is_none();
605 let opt = OptMatMul::new(
606 mmms,
607 mode_picker,
608 c_fact,
609 op.c_m(),
610 op.c_n(),
611 vec![
612 ProtoFusedSpec::AddMatMul {
613 geo,
614 a: 0,
615 b: 1,
616 packings: izip!(packings, extractor).collect_vec(),
617 },
618 ProtoFusedSpec::Store(outputs),
619 ],
620 trivial_packing,
621 )
622 .context("Creating OptMatMul")?;
623 let output = patch.wire_node(name, opt, &[pa, pb])?[0];
624 patch.shunt_outside(model, node.id.into(), output)?;
625 Ok(Some(patch))
626}