use crate::tensor::{DeviceTensor, DeviceTensorExt};
use derive_new::new;
use tract_core::internal::*;
pub type DispatchApplyRopeFn =
fn(&DeviceTensor, &DeviceTensor, &DeviceTensor, &DeviceTensor) -> TractResult<()>;
#[derive(Clone, new)]
pub struct GpuApplyRope {
pub backend_name: &'static str,
pub dispatch: DispatchApplyRopeFn,
}
impl std::fmt::Debug for GpuApplyRope {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}ApplyRope", self.backend_name)
}
}
impl PartialEq for GpuApplyRope {
fn eq(&self, other: &Self) -> bool {
self.backend_name == other.backend_name
}
}
impl Eq for GpuApplyRope {}
impl std::hash::Hash for GpuApplyRope {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.backend_name.hash(state);
}
}
impl Op for GpuApplyRope {
fn name(&self) -> StaticName {
format!("{}ApplyRope", self.backend_name).into()
}
op_as_typed_op!();
}
impl EvalOp for GpuApplyRope {
fn is_stateless(&self) -> bool {
true
}
fn eval_with_session(
&self,
node_id: usize,
session: &TurnState,
inputs: TVec<TValue>,
) -> TractResult<TVec<TValue>> {
let (input_val, cos_val, sin_val) = args_3!(inputs);
let input = input_val.to_device_tensor()?;
let cos = cos_val.to_device_tensor()?;
let sin = sin_val.to_device_tensor()?;
let output = crate::session_handler::make_tensor_for_node(
session,
node_id,
input.datum_type(),
input.shape(),
)?;
(self.dispatch)(input, cos, sin, &output)?;
Ok(tvec!(output.into_tensor().into_tvalue()))
}
}
impl TypedOp for GpuApplyRope {
fn output_facts(&self, inputs: &[&TypedFact]) -> TractResult<TVec<TypedFact>> {
crate::utils::facts_to_device_facts(inputs, |facts| {
let dt = facts[0].datum_type;
let fact = dt.fact(facts[0].shape.clone());
Ok(tvec!(fact))
})
.with_context(|| format!("Error while computing facts for {:?}", self.name()))
}
as_op!();
}