Skip to main content

tract_linalg/frame/mmm/
scratch.rs

1use super::{FusedKerSpec, FusedSpec, MatMatMulKer, OutputStoreKer};
2use crate::{BinOp, LADatum};
3use downcast_rs::{impl_downcast, Downcast};
4use std::cell::RefCell;
5use std::fmt::Debug;
6use std::sync::atomic::AtomicUsize;
7use tract_data::internal::num_integer::Integer;
8use tract_data::internal::*;
9
10static GENERATION: AtomicUsize = AtomicUsize::new(1);
11
12thread_local! {
13    static TLS: RefCell<TLSScratch> = Default::default();
14}
15
16#[derive(Default, Debug)]
17struct TLSScratch {
18    generation: usize,
19    blob: Blob,
20    ker_specs_16: Vec<FusedKerSpec<f16>>,
21    ker_specs_32: Vec<FusedKerSpec<f32>>,
22    ker_specs_64: Vec<FusedKerSpec<f64>>,
23}
24
25impl TLSScratch {
26    #[allow(unknown_lints, clippy::missing_transmute_annotations)]
27    fn ker_specs<TI: LADatum>(&mut self) -> &mut Vec<FusedKerSpec<TI>> {
28        unsafe {
29            if TI::datum_type() == f32::datum_type() || TI::datum_type() == i32::datum_type() {
30                std::mem::transmute(&mut self.ker_specs_32)
31            } else if TI::datum_type() == f16::datum_type() {
32                std::mem::transmute(&mut self.ker_specs_16)
33            } else if TI::datum_type() == f64::datum_type() {
34                std::mem::transmute(&mut self.ker_specs_64)
35            } else {
36                todo!();
37            }
38        }
39    }
40
41    fn sync<TI: LADatum>(&mut self, scratch: &ScratchSpaceImpl<TI>) {
42        if self.generation == scratch.generation {
43            return;
44        }
45        let ker_specs = self.ker_specs::<TI>();
46        ker_specs.clear();
47        ker_specs.extend_from_slice(&scratch.ker_specs);
48
49        unsafe {
50            self.blob.ensure_size_and_align(scratch.blob_size, scratch.blob_align);
51
52            for LocDependant { loc, ker_spec, .. } in &scratch.loc_dependant {
53                #[allow(clippy::single_match)]
54                if matches!(scratch.ker_specs[*ker_spec], FusedKerSpec::AddMatMul { .. }) {
55                    let scratch = &mut *(self.blob.as_ptr().add(*loc) as *mut AddMatMulTemp);
56                    scratch.panel_a_id = usize::MAX;
57                    scratch.panel_b_id = usize::MAX;
58                };
59            }
60        }
61        self.generation = scratch.generation;
62    }
63}
64
65pub trait ScratchSpace: Downcast + Send {}
66impl_downcast!(ScratchSpace);
67
68#[derive(Debug, Default)]
69pub struct ScratchSpaceImpl<TI: LADatum> {
70    generation: usize,
71    blob_size: usize,
72    blob_align: usize,
73    ker_specs: Vec<FusedKerSpec<TI>>,
74    loc_dependant: TVec<LocDependant>,
75    valid_down_tiles: usize,
76    remnant_down: usize,
77    valid_right_tiles: usize,
78    remnant_right: usize,
79}
80
81#[derive(Debug, new)]
82struct LocDependant {
83    spec: usize,
84    ker_spec: usize,
85    // offset for the location dependant structure
86    loc: usize,
87    // offset of its associated dynamic-size buffers
88    buffer_a: Option<usize>,
89    buffer_b: Option<usize>,
90}
91
92impl<TI: LADatum> ScratchSpace for ScratchSpaceImpl<TI> {}
93unsafe impl<TI: LADatum> Send for ScratchSpaceImpl<TI> {}
94
95#[derive(Debug)]
96struct AddMatMulTemp {
97    ptr_a: *const u8,
98    panel_a_id: usize,
99    ptr_b: *const u8,
100    panel_b_id: usize,
101}
102
103impl<TI: LADatum> ScratchSpaceImpl<TI> {
104    pub unsafe fn prepare(
105        &mut self,
106        ker: &impl MatMatMulKer<Acc = TI>,
107        m: usize,
108        n: usize,
109        specs: &[FusedSpec],
110    ) -> TractResult<()> {
111        use FusedKerSpec as FKS;
112        use FusedSpec as FS;
113        self.ker_specs.clear();
114        self.loc_dependant.clear();
115        self.ker_specs.reserve(specs.len() + 2);
116        self.ker_specs.push(FusedKerSpec::Clear);
117        self.valid_down_tiles = m / ker.mr();
118        self.remnant_down = m % ker.mr();
119        self.valid_right_tiles = n / ker.nr();
120        self.remnant_right = n % ker.nr();
121        let mut offset = 0;
122        let mut align = std::mem::size_of::<*const ()>();
123        fn ld(spec: usize, uspec: usize, loc: usize) -> LocDependant {
124            LocDependant { spec, ker_spec: uspec, loc, buffer_a: None, buffer_b: None }
125        }
126        for (ix, spec) in specs.iter().enumerate() {
127            offset = offset.next_multiple_of(&align);
128            let ker_spec = match spec {
129                FS::BinScalar(t, op) => match op {
130                    BinOp::Min => FKS::ScalarMin(*t.to_scalar()?),
131                    BinOp::Max => FKS::ScalarMax(*t.to_scalar()?),
132                    BinOp::Mul => FKS::ScalarMul(*t.to_scalar()?),
133                    BinOp::Add => FKS::ScalarAdd(*t.to_scalar()?),
134                    BinOp::Sub => FKS::ScalarSub(*t.to_scalar()?),
135                    BinOp::SubF => FKS::ScalarSubF(*t.to_scalar()?),
136                },
137                FS::ShiftLeft(s) => FKS::ShiftLeft(*s),
138                FS::RoundingShiftRight(s, rp) => FKS::RoundingShiftRight(*s, *rp),
139                FS::QScale(s, rp, m) => FKS::QScale(*s, *rp, *m),
140                FS::BinPerRow(_, _) => {
141                    self.loc_dependant.push(ld(ix, self.ker_specs.len(), offset));
142                    offset += TI::datum_type().size_of() * ker.mr();
143                    FusedKerSpec::Done
144                }
145                FS::BinPerCol(_, _) => {
146                    self.loc_dependant.push(ld(ix, self.ker_specs.len(), offset));
147                    offset += TI::datum_type().size_of() * ker.nr();
148                    FusedKerSpec::Done
149                }
150                FS::AddRowColProducts(_, _) => {
151                    self.loc_dependant.push(ld(ix, self.ker_specs.len(), offset));
152                    offset += TI::datum_type().size_of() * (ker.mr() + ker.nr());
153                    FusedKerSpec::Done
154                }
155                FS::AddUnicast(_) => {
156                    self.loc_dependant.push(ld(ix, self.ker_specs.len(), offset));
157                    offset += TI::datum_type().size_of() * ker.mr() * ker.nr();
158                    FusedKerSpec::Done
159                }
160                FS::Store(store) => {
161                    self.loc_dependant.push(ld(ix, self.ker_specs.len(), offset));
162                    offset += store.item_size * ker.mr() * ker.nr();
163                    FusedKerSpec::Done
164                }
165                FS::LeakyRelu(t) => FKS::LeakyRelu(*t.to_scalar()?),
166                FS::AddMatMul { a, b, packing } => {
167                    let mut ld = ld(ix, self.ker_specs.len(), offset);
168                    offset += std::mem::size_of::<AddMatMulTemp>();
169                    if let Some(tmp) = a.scratch_panel_buffer_layout() {
170                        align = tmp.align().lcm(&align);
171                        offset = Integer::next_multiple_of(&offset, &tmp.align());
172                        ld.buffer_a = Some(offset);
173                        offset += tmp.size();
174                    }
175                    if let Some(tmp) = b.scratch_panel_buffer_layout() {
176                        align = tmp.align().lcm(&align);
177                        offset = Integer::next_multiple_of(&offset, &tmp.align());
178                        ld.buffer_b = Some(offset);
179                        offset += tmp.size();
180                    }
181                    self.loc_dependant.push(ld);
182                    FusedKerSpec::AddMatMul {
183                        k: 0,
184                        pa: std::ptr::null(),
185                        pb: std::ptr::null(),
186                        packing: *packing,
187                    }
188                }
189            };
190            self.ker_specs.push(ker_spec);
191        }
192        self.ker_specs.push(FKS::Done);
193        self.blob_size = offset;
194        self.blob_align = align;
195
196        self.generation = GENERATION.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
197        Ok(())
198    }
199
200    pub unsafe fn run(
201        &self,
202        ker: &impl MatMatMulKer<Acc = TI>,
203        specs: &[FusedSpec],
204        down: usize,
205        right: usize,
206    ) -> TractResult<()> {
207        unsafe {
208            TLS.with_borrow_mut(|tls| {
209                tls.sync(self);
210                if down < self.valid_down_tiles && right < self.valid_right_tiles {
211                    self.for_valid_tile(ker, specs, tls, down, right)?;
212                    let err = ker.kernel(tls.ker_specs());
213                    debug_assert_eq!(err, 0, "Kernel return error {err}");
214                } else {
215                    let remnant_down =
216                        if down < self.valid_down_tiles { ker.mr() } else { self.remnant_down };
217                    let remnant_right =
218                        if right < self.valid_right_tiles { ker.nr() } else { self.remnant_right };
219                    self.for_border_tile(
220                        ker,
221                        specs,
222                        tls,
223                        down,
224                        right,
225                        remnant_down,
226                        remnant_right,
227                    )?;
228                    let err = ker.kernel(tls.ker_specs());
229                    debug_assert_eq!(err, 0, "Kernel return error {err}");
230                    self.postprocess_tile(specs, tls, down, right, remnant_down, remnant_right)?;
231                }
232                Ok(())
233            })
234        }
235    }
236
237    #[inline(always)]
238    unsafe fn for_valid_tile(
239        &self,
240        ker: &impl MatMatMulKer<Acc = TI>,
241        specs: &[FusedSpec],
242        tls: &mut TLSScratch,
243        down: usize,
244        right: usize,
245    ) -> TractResult<()> {
246        unsafe {
247            use FusedKerSpec as FKS;
248            use FusedSpec as FS;
249            let ScratchSpaceImpl { ker_specs, loc_dependant, .. } = self;
250            debug_assert!(specs.len() + 2 == ker_specs.len());
251            for LocDependant { spec, ker_spec, loc, buffer_a, buffer_b } in loc_dependant {
252                let spec = specs.get_unchecked(*spec);
253                let it = match spec {
254                    FS::BinPerRow(v, op) => {
255                        let v = v.as_ptr_unchecked::<TI>().add(down * ker.mr());
256                        match op {
257                            BinOp::Min => FKS::PerRowMin(v),
258                            BinOp::Max => FKS::PerRowMax(v),
259                            BinOp::Add => FKS::PerRowAdd(v),
260                            BinOp::Mul => FKS::PerRowMul(v),
261                            BinOp::Sub => FKS::PerRowSub(v),
262                            BinOp::SubF => FKS::PerRowSubF(v),
263                        }
264                    }
265                    FS::BinPerCol(v, op) => {
266                        let v = v.as_ptr_unchecked::<TI>().add(right * ker.nr());
267                        match op {
268                            BinOp::Min => FKS::PerColMin(v),
269                            BinOp::Max => FKS::PerColMax(v),
270                            BinOp::Add => FKS::PerColAdd(v),
271                            BinOp::Mul => FKS::PerColMul(v),
272                            BinOp::Sub => FKS::PerColSub(v),
273                            BinOp::SubF => FKS::PerColSubF(v),
274                        }
275                    }
276                    FS::AddRowColProducts(rows, cols) => {
277                        let row_ptr = rows.as_ptr_unchecked::<TI>().add(down * ker.mr());
278                        let col_ptr = cols.as_ptr_unchecked::<TI>().add(right * ker.nr());
279                        FKS::AddRowColProducts(row_ptr, col_ptr)
280                    }
281                    FS::AddUnicast(store) => FKS::AddUnicast(store.tile_c(down, right)),
282                    FS::Store(c_store) => FKS::Store(c_store.tile_c(down, right)),
283                    FS::AddMatMul { a, b, packing } => {
284                        let scratch = (tls.blob.as_mut_ptr().add(*loc) as *mut AddMatMulTemp)
285                            .as_mut()
286                            .unwrap();
287                        if scratch.panel_a_id != down {
288                            scratch.ptr_a = a.panel_bytes(
289                                down,
290                                buffer_a.map(|o| tls.blob.as_mut_ptr().add(o)),
291                            )?;
292                            scratch.panel_a_id = down;
293                        }
294                        if scratch.panel_b_id != right {
295                            scratch.ptr_b = b.panel_bytes(
296                                right,
297                                buffer_b.map(|o| tls.blob.as_mut_ptr().add(o)),
298                            )?;
299                            scratch.panel_b_id = right;
300                        }
301                        FKS::AddMatMul {
302                            k: b.k(),
303                            pa: scratch.ptr_a,
304                            pb: scratch.ptr_b,
305                            packing: *packing,
306                        }
307                    }
308                    _ => std::hint::unreachable_unchecked(),
309                };
310                *tls.ker_specs().get_unchecked_mut(*ker_spec) = it;
311            }
312            Ok(())
313        }
314    }
315
316    #[inline(never)]
317    #[allow(clippy::too_many_arguments)]
318    unsafe fn for_border_tile(
319        &self,
320        ker: &impl MatMatMulKer<Acc = TI>,
321        specs: &[FusedSpec],
322        tls: &mut TLSScratch,
323        down: usize,
324        right: usize,
325        m_remnant: usize,
326        n_remnant: usize,
327    ) -> TractResult<()> {
328        unsafe {
329            use FusedKerSpec as FKS;
330            use FusedSpec as FS;
331            for LocDependant { spec, ker_spec: uspec, loc, buffer_a, buffer_b } in
332                &self.loc_dependant
333            {
334                let loc = tls.blob.as_mut_ptr().add(*loc);
335                let spec = specs.get_unchecked(*spec);
336                let it = match spec {
337                    FS::BinPerRow(v, op) => {
338                        let buf = std::slice::from_raw_parts_mut(loc as *mut TI, ker.mr());
339                        let ptr = if m_remnant < ker.mr() {
340                            if m_remnant > 0 {
341                                buf.get_unchecked_mut(..m_remnant).copy_from_slice(
342                                    v.as_slice_unchecked()
343                                        .get_unchecked(down * ker.mr()..)
344                                        .get_unchecked(..m_remnant),
345                                );
346                            }
347                            if cfg!(debug_assertions) {
348                                buf.get_unchecked_mut(m_remnant..)
349                                    .iter_mut()
350                                    .for_each(|x| *x = TI::zero());
351                            }
352                            buf.as_ptr()
353                        } else {
354                            v.as_ptr_unchecked::<TI>().add(down * ker.mr())
355                        };
356                        match op {
357                            BinOp::Min => FKS::PerRowMin(ptr),
358                            BinOp::Max => FKS::PerRowMax(ptr),
359                            BinOp::Add => FKS::PerRowAdd(ptr),
360                            BinOp::Mul => FKS::PerRowMul(ptr),
361                            BinOp::Sub => FKS::PerRowSub(ptr),
362                            BinOp::SubF => FKS::PerRowSubF(ptr),
363                        }
364                    }
365                    FS::BinPerCol(v, op) => {
366                        let buf = std::slice::from_raw_parts_mut(loc as *mut TI, ker.nr());
367                        let ptr = if n_remnant < ker.nr() {
368                            if n_remnant > 0 {
369                                buf.get_unchecked_mut(..n_remnant).copy_from_slice(
370                                    v.as_slice_unchecked()
371                                        .get_unchecked(right * ker.nr()..)
372                                        .get_unchecked(..n_remnant),
373                                );
374                            }
375                            if cfg!(debug_assertions) {
376                                buf.get_unchecked_mut(n_remnant..)
377                                    .iter_mut()
378                                    .for_each(|x| *x = TI::zero());
379                            }
380                            buf.as_ptr()
381                        } else {
382                            v.as_ptr_unchecked::<TI>().add(right * ker.nr())
383                        };
384                        match op {
385                            BinOp::Min => FKS::PerColMin(ptr),
386                            BinOp::Max => FKS::PerColMax(ptr),
387                            BinOp::Add => FKS::PerColAdd(ptr),
388                            BinOp::Mul => FKS::PerColMul(ptr),
389                            BinOp::Sub => FKS::PerColSub(ptr),
390                            BinOp::SubF => FKS::PerColSubF(ptr),
391                        }
392                    }
393                    FS::AddRowColProducts(rows, cols) => {
394                        let r = std::slice::from_raw_parts_mut(loc as *mut TI, ker.mr());
395                        let row_ptr = if m_remnant < ker.mr() {
396                            r.get_unchecked_mut(..m_remnant).copy_from_slice(
397                                rows.as_slice_unchecked()
398                                    .get_unchecked(down * ker.mr()..)
399                                    .get_unchecked(..m_remnant),
400                            );
401                            if cfg!(debug_assertions) {
402                                r.get_unchecked_mut(m_remnant..)
403                                    .iter_mut()
404                                    .for_each(|x| *x = TI::zero());
405                            }
406                            r.as_ptr()
407                        } else {
408                            rows.as_ptr_unchecked::<TI>().add(down * ker.mr())
409                        };
410                        let c = std::slice::from_raw_parts_mut(
411                            (loc as *mut TI).add(ker.mr()),
412                            ker.nr(),
413                        );
414                        let col_ptr = if n_remnant < ker.nr() {
415                            c.get_unchecked_mut(..n_remnant).copy_from_slice(
416                                cols.as_slice_unchecked()
417                                    .get_unchecked(right * ker.nr()..)
418                                    .get_unchecked(..n_remnant),
419                            );
420                            if cfg!(debug_assertions) {
421                                r.get_unchecked_mut(n_remnant..)
422                                    .iter_mut()
423                                    .for_each(|x| *x = TI::zero());
424                            }
425                            c.as_ptr()
426                        } else {
427                            cols.as_ptr_unchecked::<TI>().add(right * ker.nr())
428                        };
429                        FKS::AddRowColProducts(row_ptr, col_ptr)
430                    }
431                    FS::AddUnicast(store) => {
432                        let row_byte_stride = store.row_byte_stride;
433                        let col_byte_stride = store.col_byte_stride;
434                        let tile_offset = row_byte_stride * down as isize * ker.mr() as isize
435                            + col_byte_stride * right as isize * ker.nr() as isize;
436                        let tile_ptr = store.ptr.offset(tile_offset);
437                        let tmp_d_tile =
438                            std::slice::from_raw_parts_mut(loc as *mut TI, ker.mr() * ker.nr());
439                        if cfg!(debug_assertions) {
440                            tmp_d_tile.iter_mut().for_each(|t| *t = TI::zero());
441                        }
442                        for r in 0..m_remnant as isize {
443                            for c in 0..n_remnant as isize {
444                                let inner_offset = c * col_byte_stride + r * row_byte_stride;
445                                if inner_offset + tile_offset
446                                    < (store.item_size * store.item_count) as isize
447                                {
448                                    *tmp_d_tile
449                                        .get_unchecked_mut(r as usize + c as usize * ker.mr()) =
450                                        *(tile_ptr.offset(inner_offset) as *const TI);
451                                }
452                            }
453                        }
454                        FKS::AddUnicast(OutputStoreKer {
455                            ptr: tmp_d_tile.as_ptr() as _,
456                            row_byte_stride: std::mem::size_of::<TI>() as isize,
457                            col_byte_stride: (std::mem::size_of::<TI>() * ker.mr()) as isize,
458                            item_size: std::mem::size_of::<TI>(),
459                        })
460                    }
461                    FS::Store(c_store) => {
462                        let tmpc = OutputStoreKer {
463                            ptr: loc as _,
464                            item_size: c_store.item_size,
465                            row_byte_stride: c_store.item_size as isize,
466                            col_byte_stride: (c_store.item_size * ker.mr()) as isize,
467                        };
468                        FKS::Store(tmpc)
469                    }
470                    FS::AddMatMul { a, b, packing } => {
471                        let scratch = (loc as *mut AddMatMulTemp).as_mut().unwrap();
472                        if scratch.panel_a_id != down {
473                            scratch.ptr_a = a.panel_bytes(
474                                down,
475                                buffer_a.map(|o| tls.blob.as_mut_ptr().add(o)),
476                            )?;
477                            scratch.panel_a_id = down;
478                        }
479                        if scratch.panel_b_id != right {
480                            scratch.ptr_b = b.panel_bytes(
481                                right,
482                                buffer_b.map(|o| tls.blob.as_mut_ptr().add(o)),
483                            )?;
484                            scratch.panel_b_id = right;
485                        }
486                        FKS::AddMatMul {
487                            k: b.k(),
488                            pa: scratch.ptr_a,
489                            pb: scratch.ptr_b,
490                            packing: *packing,
491                        }
492                    }
493                    _ => std::hint::unreachable_unchecked(),
494                };
495                *tls.ker_specs().get_unchecked_mut(*uspec) = it;
496            }
497            Ok(())
498        }
499    }
500
501    #[inline]
502    pub fn uspecs(&self) -> &[FusedKerSpec<TI>] {
503        &self.ker_specs
504    }
505
506    unsafe fn postprocess_tile(
507        &self,
508        specs: &[FusedSpec],
509        tls: &mut TLSScratch,
510        down: usize,
511        right: usize,
512        m_remnant: usize,
513        n_remnant: usize,
514    ) -> TractResult<()>
515    where
516        TI: LADatum,
517    {
518        unsafe {
519            for LocDependant { spec, ker_spec: uspec, .. } in self.loc_dependant.iter() {
520                let spec = specs.get_unchecked(*spec);
521                let ker_spec = tls.ker_specs::<TI>().get_unchecked(*uspec);
522                if let (FusedSpec::Store(c_store), FusedKerSpec::Store(tmp)) = (spec, ker_spec) {
523                    c_store.set_from_tile(down, right, m_remnant, n_remnant, tmp)
524                }
525            }
526            Ok(())
527        }
528    }
529}