1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
crate::ix!();
/**
| ScaleBlobs takes one or more input data
| (Tensor) and produces one or more output
| data (Tensor) whose value is the input
| data tensor scaled element-wise.
|
*/
#[USE_OPERATOR_CONTEXT_FUNCTIONS]
pub struct ScaleBlobsOp<Context> {
storage: OperatorStorage,
context: Context,
scale: f32,
blob_sizes: Tensor,
inputs: Tensor,
outputs: Tensor,
host_blob_sizes: Tensor,
host_inputs: Tensor,
host_outputs: Tensor,
}
register_cpu_operator!{ScaleBlobs, ScaleBlobsOp<CPUContext>}
num_inputs!{ScaleBlobs, (1,INT_MAX)}
num_outputs!{ScaleBlobs, (1,INT_MAX)}
args!{ScaleBlobs,
0 => ("scale", "(float, default 1.0) the scale to apply.")
}
identical_type_and_shape!{ScaleBlobs}
allow_inplace!{ScaleBlobs,
|x: i32, y: i32| {
true
}
}
impl<Context> ScaleBlobsOp<Context> {
pub fn new<Args>(args: Args) -> Self {
todo!();
/*
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(float, "scale", scale_, 1.0f)
*/
}
#[inline] pub fn do_run_with_type<T>(&mut self) -> bool {
todo!();
/*
int batchSize = InputSize();
for (int i = 0; i < batchSize; ++i) {
const auto& X = Input(i);
auto* Y = Output(i, X.sizes(), at::dtype<T>());
math::Scale<float, T, Context>(
X.numel(),
scale_,
X.template data<T>(),
Y->template mutable_data<T>(),
&context_);
}
return true;
*/
}
#[inline] pub fn run_on_device(&mut self) -> bool {
todo!();
/*
for (int i = 0; i < InputSize(); ++i) {
auto& input = this->template Input<Tensor>(i, CPU);
auto* output = this->template Output<Tensor>(i, CPU);
output->ResizeLike(input);
}
return DispatchHelper<TensorTypes<float>>::call(this, Input(0));
*/
}
}