1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
use co::prelude::*;
use co::plugin::numeric_helpers::*;
pub trait Transformer {
fn transform(&self, shape: &[usize]) -> Result<SharedTensor<f32>, TransformerError> {
let native_backend = Backend::<Native>::default().unwrap();
let mut tensor = SharedTensor::<f32>::new(native_backend.device(), &shape).unwrap();
{
let mut native_tensor = tensor.get_mut(native_backend.device()).unwrap();
try!(Self::write_to_memory(&mut native_tensor, &self.transform_to_vec()));
}
Ok(tensor)
}
fn transform_to_vec(&self) -> Vec<f32>;
fn write_to_memory<T: NumCast + ::std::marker::Copy>(mem: &mut MemoryType, data: &[T]) -> Result<(), TransformerError> {
Self::write_to_memory_offset(mem, data, 0)
}
fn write_to_memory_offset<T: NumCast + ::std::marker::Copy>(mem: &mut MemoryType, data: &[T], offset: usize) -> Result<(), TransformerError> {
match mem {
&mut MemoryType::Native(ref mut mem) => {
let mut mem_buffer = mem.as_mut_slice::<f32>();
if offset == 0 && mem_buffer.len() != data.len() {
return Err(TransformerError::InvalidShape);
}
for (index, datum) in data.iter().enumerate() {
let old_val = try!(mem_buffer.get_mut(index + offset).ok_or(TransformerError::InvalidShape));
*old_val = cast(*datum).unwrap();
}
Ok(())
},
#[cfg(any(feature = "opencl", feature = "cuda"))]
_ => { unimplemented!() }
}
}
}
#[derive(Debug, Copy, Clone)]
pub enum TransformerError {
InvalidShape,
InvalidRgbPixels,
InvalidRgbaPixels,
InvalidLumaPixels,
InvalidLumaAlphaPixels,
}