1use super::{FusedKerSpec, FusedSpec, MatMatMulKer, OutputStoreKer};
2use crate::{BinOp, LADatum};
3use downcast_rs::{impl_downcast, Downcast};
4use std::cell::RefCell;
5use std::fmt::Debug;
6use std::sync::atomic::AtomicUsize;
7use tract_data::internal::num_integer::Integer;
8use tract_data::internal::*;
9
10static GENERATION: AtomicUsize = AtomicUsize::new(1);
11
12thread_local! {
13 static TLS: RefCell<TLSScratch> = Default::default();
14}
15
16#[derive(Default, Debug)]
17struct TLSScratch {
18 generation: usize,
19 blob: Blob,
20 ker_specs_16: Vec<FusedKerSpec<f16>>,
21 ker_specs_32: Vec<FusedKerSpec<f32>>,
22 ker_specs_64: Vec<FusedKerSpec<f64>>,
23}
24
25impl TLSScratch {
26 #[allow(unknown_lints, clippy::missing_transmute_annotations)]
27 fn ker_specs<TI: LADatum>(&mut self) -> &mut Vec<FusedKerSpec<TI>> {
28 unsafe {
29 if TI::datum_type() == f32::datum_type() || TI::datum_type() == i32::datum_type() {
30 std::mem::transmute(&mut self.ker_specs_32)
31 } else if TI::datum_type() == f16::datum_type() {
32 std::mem::transmute(&mut self.ker_specs_16)
33 } else if TI::datum_type() == f64::datum_type() {
34 std::mem::transmute(&mut self.ker_specs_64)
35 } else {
36 todo!();
37 }
38 }
39 }
40
41 fn sync<TI: LADatum>(&mut self, scratch: &ScratchSpaceImpl<TI>) {
42 if self.generation == scratch.generation {
43 return;
44 }
45 let ker_specs = self.ker_specs::<TI>();
46 ker_specs.clear();
47 ker_specs.extend_from_slice(&scratch.ker_specs);
48
49 unsafe {
50 self.blob.ensure_size_and_align(scratch.blob_size, scratch.blob_align);
51
52 for LocDependant { loc, ker_spec, .. } in &scratch.loc_dependant {
53 #[allow(clippy::single_match)]
54 if matches!(scratch.ker_specs[*ker_spec], FusedKerSpec::AddMatMul { .. }) {
55 let scratch = &mut *(self.blob.as_ptr().add(*loc) as *mut AddMatMulTemp);
56 scratch.panel_a_id = usize::MAX;
57 scratch.panel_b_id = usize::MAX;
58 };
59 }
60 }
61 self.generation = scratch.generation;
62 }
63}
64
65pub trait ScratchSpace: Downcast + Send {}
66impl_downcast!(ScratchSpace);
67
68#[derive(Debug, Default)]
69pub struct ScratchSpaceImpl<TI: LADatum> {
70 generation: usize,
71 blob_size: usize,
72 blob_align: usize,
73 ker_specs: Vec<FusedKerSpec<TI>>,
74 loc_dependant: TVec<LocDependant>,
75 valid_down_tiles: usize,
76 remnant_down: usize,
77 valid_right_tiles: usize,
78 remnant_right: usize,
79}
80
81#[derive(Debug, new)]
82struct LocDependant {
83 spec: usize,
84 ker_spec: usize,
85 loc: usize,
87 buffer_a: Option<usize>,
89 buffer_b: Option<usize>,
90}
91
92impl<TI: LADatum> ScratchSpace for ScratchSpaceImpl<TI> {}
93unsafe impl<TI: LADatum> Send for ScratchSpaceImpl<TI> {}
94
95#[derive(Debug)]
96struct AddMatMulTemp {
97 ptr_a: *const u8,
98 panel_a_id: usize,
99 ptr_b: *const u8,
100 panel_b_id: usize,
101}
102
103impl<TI: LADatum> ScratchSpaceImpl<TI> {
104 pub unsafe fn prepare(
105 &mut self,
106 ker: &impl MatMatMulKer<Acc = TI>,
107 m: usize,
108 n: usize,
109 specs: &[FusedSpec],
110 ) -> TractResult<()> {
111 use FusedKerSpec as FKS;
112 use FusedSpec as FS;
113 self.ker_specs.clear();
114 self.loc_dependant.clear();
115 self.ker_specs.reserve(specs.len() + 2);
116 self.ker_specs.push(FusedKerSpec::Clear);
117 self.valid_down_tiles = m / ker.mr();
118 self.remnant_down = m % ker.mr();
119 self.valid_right_tiles = n / ker.nr();
120 self.remnant_right = n % ker.nr();
121 let mut offset = 0;
122 let mut align = std::mem::size_of::<*const ()>();
123 fn ld(spec: usize, uspec: usize, loc: usize) -> LocDependant {
124 LocDependant { spec, ker_spec: uspec, loc, buffer_a: None, buffer_b: None }
125 }
126 for (ix, spec) in specs.iter().enumerate() {
127 offset = offset.next_multiple_of(&align);
128 let ker_spec = match spec {
129 FS::BinScalar(t, op) => match op {
130 BinOp::Min => FKS::ScalarMin(*t.to_scalar()?),
131 BinOp::Max => FKS::ScalarMax(*t.to_scalar()?),
132 BinOp::Mul => FKS::ScalarMul(*t.to_scalar()?),
133 BinOp::Add => FKS::ScalarAdd(*t.to_scalar()?),
134 BinOp::Sub => FKS::ScalarSub(*t.to_scalar()?),
135 BinOp::SubF => FKS::ScalarSubF(*t.to_scalar()?),
136 },
137 FS::ShiftLeft(s) => FKS::ShiftLeft(*s),
138 FS::RoundingShiftRight(s, rp) => FKS::RoundingShiftRight(*s, *rp),
139 FS::QScale(s, rp, m) => FKS::QScale(*s, *rp, *m),
140 FS::BinPerRow(_, _) => {
141 self.loc_dependant.push(ld(ix, self.ker_specs.len(), offset));
142 offset += TI::datum_type().size_of() * ker.mr();
143 FusedKerSpec::Done
144 }
145 FS::BinPerCol(_, _) => {
146 self.loc_dependant.push(ld(ix, self.ker_specs.len(), offset));
147 offset += TI::datum_type().size_of() * ker.nr();
148 FusedKerSpec::Done
149 }
150 FS::AddRowColProducts(_, _) => {
151 self.loc_dependant.push(ld(ix, self.ker_specs.len(), offset));
152 offset += TI::datum_type().size_of() * (ker.mr() + ker.nr());
153 FusedKerSpec::Done
154 }
155 FS::AddUnicast(_) => {
156 self.loc_dependant.push(ld(ix, self.ker_specs.len(), offset));
157 offset += TI::datum_type().size_of() * ker.mr() * ker.nr();
158 FusedKerSpec::Done
159 }
160 FS::Store(store) => {
161 self.loc_dependant.push(ld(ix, self.ker_specs.len(), offset));
162 offset += store.item_size * ker.mr() * ker.nr();
163 FusedKerSpec::Done
164 }
165 FS::LeakyRelu(t) => FKS::LeakyRelu(*t.to_scalar()?),
166 FS::AddMatMul { a, b, packing } => {
167 let mut ld = ld(ix, self.ker_specs.len(), offset);
168 offset += std::mem::size_of::<AddMatMulTemp>();
169 if let Some(tmp) = a.scratch_panel_buffer_layout() {
170 align = tmp.align().lcm(&align);
171 offset = Integer::next_multiple_of(&offset, &tmp.align());
172 ld.buffer_a = Some(offset);
173 offset += tmp.size();
174 }
175 if let Some(tmp) = b.scratch_panel_buffer_layout() {
176 align = tmp.align().lcm(&align);
177 offset = Integer::next_multiple_of(&offset, &tmp.align());
178 ld.buffer_b = Some(offset);
179 offset += tmp.size();
180 }
181 self.loc_dependant.push(ld);
182 FusedKerSpec::AddMatMul {
183 k: 0,
184 pa: std::ptr::null(),
185 pb: std::ptr::null(),
186 packing: *packing,
187 }
188 }
189 };
190 self.ker_specs.push(ker_spec);
191 }
192 self.ker_specs.push(FKS::Done);
193 self.blob_size = offset;
194 self.blob_align = align;
195
196 self.generation = GENERATION.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
197 Ok(())
198 }
199
200 pub unsafe fn run(
201 &self,
202 ker: &impl MatMatMulKer<Acc = TI>,
203 specs: &[FusedSpec],
204 down: usize,
205 right: usize,
206 ) -> TractResult<()> {
207 TLS.with_borrow_mut(|tls| {
208 tls.sync(self);
209 if down < self.valid_down_tiles && right < self.valid_right_tiles {
210 self.for_valid_tile(ker, specs, tls, down, right)?;
211 let err = ker.kernel(tls.ker_specs());
212 debug_assert_eq!(err, 0, "Kernel return error {err}");
213 } else {
214 let remnant_down =
215 if down < self.valid_down_tiles { ker.mr() } else { self.remnant_down };
216 let remnant_right =
217 if right < self.valid_right_tiles { ker.nr() } else { self.remnant_right };
218 self.for_border_tile(ker, specs, tls, down, right, remnant_down, remnant_right)?;
219 let err = ker.kernel(tls.ker_specs());
220 debug_assert_eq!(err, 0, "Kernel return error {err}");
221 self.postprocess_tile(specs, tls, down, right, remnant_down, remnant_right)?;
222 }
223 Ok(())
224 })
225 }
226
227 #[inline(always)]
228 unsafe fn for_valid_tile(
229 &self,
230 ker: &impl MatMatMulKer<Acc = TI>,
231 specs: &[FusedSpec],
232 tls: &mut TLSScratch,
233 down: usize,
234 right: usize,
235 ) -> TractResult<()> {
236 use FusedKerSpec as FKS;
237 use FusedSpec as FS;
238 let ScratchSpaceImpl { ker_specs, loc_dependant, .. } = self;
239 debug_assert!(specs.len() + 2 == ker_specs.len());
240 for LocDependant { spec, ker_spec, loc, buffer_a, buffer_b } in loc_dependant {
241 let spec = specs.get_unchecked(*spec);
242 let it = match spec {
243 FS::BinPerRow(v, op) => {
244 let v = v.as_ptr_unchecked::<TI>().add(down * ker.mr());
245 match op {
246 BinOp::Min => FKS::PerRowMin(v),
247 BinOp::Max => FKS::PerRowMax(v),
248 BinOp::Add => FKS::PerRowAdd(v),
249 BinOp::Mul => FKS::PerRowMul(v),
250 BinOp::Sub => FKS::PerRowSub(v),
251 BinOp::SubF => FKS::PerRowSubF(v),
252 }
253 }
254 FS::BinPerCol(v, op) => {
255 let v = v.as_ptr_unchecked::<TI>().add(right * ker.nr());
256 match op {
257 BinOp::Min => FKS::PerColMin(v),
258 BinOp::Max => FKS::PerColMax(v),
259 BinOp::Add => FKS::PerColAdd(v),
260 BinOp::Mul => FKS::PerColMul(v),
261 BinOp::Sub => FKS::PerColSub(v),
262 BinOp::SubF => FKS::PerColSubF(v),
263 }
264 }
265 FS::AddRowColProducts(rows, cols) => {
266 let row_ptr = rows.as_ptr_unchecked::<TI>().add(down * ker.mr());
267 let col_ptr = cols.as_ptr_unchecked::<TI>().add(right * ker.nr());
268 FKS::AddRowColProducts(row_ptr, col_ptr)
269 }
270 FS::AddUnicast(store) => FKS::AddUnicast(store.tile_c(down, right)),
271 FS::Store(c_store) => FKS::Store(c_store.tile_c(down, right)),
272 FS::AddMatMul { a, b, packing } => {
273 let scratch =
274 (tls.blob.as_mut_ptr().add(*loc) as *mut AddMatMulTemp).as_mut().unwrap();
275 if scratch.panel_a_id != down {
276 scratch.ptr_a =
277 a.panel_bytes(down, buffer_a.map(|o| tls.blob.as_mut_ptr().add(o)))?;
278 scratch.panel_a_id = down;
279 }
280 if scratch.panel_b_id != right {
281 scratch.ptr_b =
282 b.panel_bytes(right, buffer_b.map(|o| tls.blob.as_mut_ptr().add(o)))?;
283 scratch.panel_b_id = right;
284 }
285 FKS::AddMatMul {
286 k: b.k(),
287 pa: scratch.ptr_a,
288 pb: scratch.ptr_b,
289 packing: *packing,
290 }
291 }
292 _ => std::hint::unreachable_unchecked(),
293 };
294 *tls.ker_specs().get_unchecked_mut(*ker_spec) = it;
295 }
296 Ok(())
297 }
298
299 #[inline(never)]
300 #[allow(clippy::too_many_arguments)]
301 unsafe fn for_border_tile(
302 &self,
303 ker: &impl MatMatMulKer<Acc = TI>,
304 specs: &[FusedSpec],
305 tls: &mut TLSScratch,
306 down: usize,
307 right: usize,
308 m_remnant: usize,
309 n_remnant: usize,
310 ) -> TractResult<()> {
311 use FusedKerSpec as FKS;
312 use FusedSpec as FS;
313 for LocDependant { spec, ker_spec: uspec, loc, buffer_a, buffer_b } in &self.loc_dependant {
314 let loc = tls.blob.as_mut_ptr().add(*loc);
315 let spec = specs.get_unchecked(*spec);
316 let it = match spec {
317 FS::BinPerRow(v, op) => {
318 let buf = std::slice::from_raw_parts_mut(loc as *mut TI, ker.mr());
319 let ptr = if m_remnant < ker.mr() {
320 if m_remnant > 0 {
321 buf.get_unchecked_mut(..m_remnant).copy_from_slice(
322 v.as_slice_unchecked()
323 .get_unchecked(down * ker.mr()..)
324 .get_unchecked(..m_remnant),
325 );
326 }
327 if cfg!(debug_assertions) {
328 buf.get_unchecked_mut(m_remnant..)
329 .iter_mut()
330 .for_each(|x| *x = TI::zero());
331 }
332 buf.as_ptr()
333 } else {
334 v.as_ptr_unchecked::<TI>().add(down * ker.mr())
335 };
336 match op {
337 BinOp::Min => FKS::PerRowMin(ptr),
338 BinOp::Max => FKS::PerRowMax(ptr),
339 BinOp::Add => FKS::PerRowAdd(ptr),
340 BinOp::Mul => FKS::PerRowMul(ptr),
341 BinOp::Sub => FKS::PerRowSub(ptr),
342 BinOp::SubF => FKS::PerRowSubF(ptr),
343 }
344 }
345 FS::BinPerCol(v, op) => {
346 let buf = std::slice::from_raw_parts_mut(loc as *mut TI, ker.nr());
347 let ptr = if n_remnant < ker.nr() {
348 if n_remnant > 0 {
349 buf.get_unchecked_mut(..n_remnant).copy_from_slice(
350 v.as_slice_unchecked()
351 .get_unchecked(right * ker.nr()..)
352 .get_unchecked(..n_remnant),
353 );
354 }
355 if cfg!(debug_assertions) {
356 buf.get_unchecked_mut(n_remnant..)
357 .iter_mut()
358 .for_each(|x| *x = TI::zero());
359 }
360 buf.as_ptr()
361 } else {
362 v.as_ptr_unchecked::<TI>().add(right * ker.nr())
363 };
364 match op {
365 BinOp::Min => FKS::PerColMin(ptr),
366 BinOp::Max => FKS::PerColMax(ptr),
367 BinOp::Add => FKS::PerColAdd(ptr),
368 BinOp::Mul => FKS::PerColMul(ptr),
369 BinOp::Sub => FKS::PerColSub(ptr),
370 BinOp::SubF => FKS::PerColSubF(ptr),
371 }
372 }
373 FS::AddRowColProducts(rows, cols) => {
374 let r = std::slice::from_raw_parts_mut(loc as *mut TI, ker.mr());
375 let row_ptr = if m_remnant < ker.mr() {
376 r.get_unchecked_mut(..m_remnant).copy_from_slice(
377 rows.as_slice_unchecked()
378 .get_unchecked(down * ker.mr()..)
379 .get_unchecked(..m_remnant),
380 );
381 if cfg!(debug_assertions) {
382 r.get_unchecked_mut(m_remnant..)
383 .iter_mut()
384 .for_each(|x| *x = TI::zero());
385 }
386 r.as_ptr()
387 } else {
388 rows.as_ptr_unchecked::<TI>().add(down * ker.mr())
389 };
390 let c =
391 std::slice::from_raw_parts_mut((loc as *mut TI).add(ker.mr()), ker.nr());
392 let col_ptr = if n_remnant < ker.nr() {
393 c.get_unchecked_mut(..n_remnant).copy_from_slice(
394 cols.as_slice_unchecked()
395 .get_unchecked(right * ker.nr()..)
396 .get_unchecked(..n_remnant),
397 );
398 if cfg!(debug_assertions) {
399 r.get_unchecked_mut(n_remnant..)
400 .iter_mut()
401 .for_each(|x| *x = TI::zero());
402 }
403 c.as_ptr()
404 } else {
405 cols.as_ptr_unchecked::<TI>().add(right * ker.nr())
406 };
407 FKS::AddRowColProducts(row_ptr, col_ptr)
408 }
409 FS::AddUnicast(store) => {
410 let row_byte_stride = store.row_byte_stride;
411 let col_byte_stride = store.col_byte_stride;
412 let tile_offset = row_byte_stride * down as isize * ker.mr() as isize
413 + col_byte_stride * right as isize * ker.nr() as isize;
414 let tile_ptr = store.ptr.offset(tile_offset);
415 let tmp_d_tile =
416 std::slice::from_raw_parts_mut(loc as *mut TI, ker.mr() * ker.nr());
417 if cfg!(debug_assertions) {
418 tmp_d_tile.iter_mut().for_each(|t| *t = TI::zero());
419 }
420 for r in 0..m_remnant as isize {
421 for c in 0..n_remnant as isize {
422 let inner_offset = c * col_byte_stride + r * row_byte_stride;
423 if inner_offset + tile_offset
424 < (store.item_size * store.item_count) as isize
425 {
426 *tmp_d_tile.get_unchecked_mut(r as usize + c as usize * ker.mr()) =
427 *(tile_ptr.offset(inner_offset) as *const TI);
428 }
429 }
430 }
431 FKS::AddUnicast(OutputStoreKer {
432 ptr: tmp_d_tile.as_ptr() as _,
433 row_byte_stride: std::mem::size_of::<TI>() as isize,
434 col_byte_stride: (std::mem::size_of::<TI>() * ker.mr()) as isize,
435 item_size: std::mem::size_of::<TI>(),
436 })
437 }
438 FS::Store(c_store) => {
439 let tmpc = OutputStoreKer {
440 ptr: loc as _,
441 item_size: c_store.item_size,
442 row_byte_stride: c_store.item_size as isize,
443 col_byte_stride: (c_store.item_size * ker.mr()) as isize,
444 };
445 FKS::Store(tmpc)
446 }
447 FS::AddMatMul { a, b, packing } => {
448 let scratch = (loc as *mut AddMatMulTemp).as_mut().unwrap();
449 if scratch.panel_a_id != down {
450 scratch.ptr_a =
451 a.panel_bytes(down, buffer_a.map(|o| tls.blob.as_mut_ptr().add(o)))?;
452 scratch.panel_a_id = down;
453 }
454 if scratch.panel_b_id != right {
455 scratch.ptr_b =
456 b.panel_bytes(right, buffer_b.map(|o| tls.blob.as_mut_ptr().add(o)))?;
457 scratch.panel_b_id = right;
458 }
459 FKS::AddMatMul {
460 k: b.k(),
461 pa: scratch.ptr_a,
462 pb: scratch.ptr_b,
463 packing: *packing,
464 }
465 }
466 _ => std::hint::unreachable_unchecked(),
467 };
468 *tls.ker_specs().get_unchecked_mut(*uspec) = it;
469 }
470 Ok(())
471 }
472
473 #[inline]
474 pub fn uspecs(&self) -> &[FusedKerSpec<TI>] {
475 &self.ker_specs
476 }
477
478 unsafe fn postprocess_tile(
479 &self,
480 specs: &[FusedSpec],
481 tls: &mut TLSScratch,
482 down: usize,
483 right: usize,
484 m_remnant: usize,
485 n_remnant: usize,
486 ) -> TractResult<()>
487 where
488 TI: LADatum,
489 {
490 for LocDependant { spec, ker_spec: uspec, .. } in self.loc_dependant.iter() {
491 let spec = specs.get_unchecked(*spec);
492 let ker_spec = tls.ker_specs::<TI>().get_unchecked(*uspec);
493 if let (FusedSpec::Store(c_store), FusedKerSpec::Store(tmp)) = (spec, ker_spec) {
494 c_store.set_from_tile(down, right, m_remnant, n_remnant, tmp)
495 }
496 }
497 Ok(())
498 }
499}