#![allow(clippy::similar_names)]
use crate::kernels::Kernel;
use crate::ptx::builder::{PtxArithmetic, PtxComparison, PtxControl};
use crate::ptx::{PtxKernel, PtxReg, PtxType};
#[derive(Debug, Clone)]
pub struct SoftmaxBackwardKernel {
pub num_rows: u32,
pub row_size: u32,
}
impl SoftmaxBackwardKernel {
#[must_use]
pub fn new(num_rows: u32, row_size: u32) -> Self {
assert!(row_size <= 32, "row_size must be ≤ 32 for warp reduction");
Self { num_rows, row_size }
}
}
impl Kernel for SoftmaxBackwardKernel {
fn name(&self) -> &str {
"softmax_backward"
}
fn build_ptx(&self) -> PtxKernel {
let row_size = self.row_size;
PtxKernel::new("softmax_backward")
.param(PtxType::U64, "output_ptr")
.param(PtxType::U64, "grad_output_ptr")
.param(PtxType::U64, "grad_input_ptr")
.param(PtxType::U32, "num_rows")
.param(PtxType::U32, "row_size")
.build(move |ctx| {
let tid = ctx.special_reg(PtxReg::TidX);
let ctaid = ctx.special_reg(PtxReg::CtaIdX);
let ntid = ctx.special_reg(PtxReg::NtidX);
let global_tid = ctx.mad_lo_u32(ctaid, ntid, tid);
let lane = ctx.and_u32_imm(global_tid, 31);
let warp_id = ctx.shr_u32_imm(global_tid, 5);
let num_rows = ctx.load_param_u32("num_rows");
let row_size_param = ctx.load_param_u32("row_size");
let output_ptr = ctx.load_param_u64("output_ptr");
let grad_output_ptr = ctx.load_param_u64("grad_output_ptr");
let grad_input_ptr = ctx.load_param_u64("grad_input_ptr");
let valid_row = ctx.setp_lt_u32(warp_id, num_rows);
ctx.branch_if_not(valid_row, "exit");
let valid_lane = ctx.setp_lt_u32(lane, row_size_param);
let row_elem_offset = ctx.mul_lo_u32(warp_id, row_size_param);
let row_byte_offset = ctx.mul_wide_u32(row_elem_offset, 4);
let output_row_base = ctx.add_u64(output_ptr, row_byte_offset);
let grad_out_row_base = ctx.add_u64(grad_output_ptr, row_byte_offset);
let grad_in_row_base = ctx.add_u64(grad_input_ptr, row_byte_offset);
let lane_offset = ctx.mul_wide_u32(lane, 4);
let output_addr = ctx.add_u64(output_row_base, lane_offset);
let grad_out_addr = ctx.add_u64(grad_out_row_base, lane_offset);
let grad_in_addr = ctx.add_u64(grad_in_row_base, lane_offset);
let y_i = ctx.ld_global_f32_predicated(output_addr, valid_lane, 0.0);
let grad_y_i = ctx.ld_global_f32_predicated(grad_out_addr, valid_lane, 0.0);
let local_product = ctx.mul_f32(y_i, grad_y_i);
let mut sum = local_product;
let warp_mask = 0xFFFF_FFFFu32;
for offset in [16u32, 8, 4, 2, 1] {
if offset < row_size {
let shuffled = ctx.shfl_down_f32(sum, offset, warp_mask);
sum = ctx.add_f32(sum, shuffled);
}
}
let dot_product = ctx.shfl_idx_f32(sum, 0, warp_mask);
let diff = ctx.sub_f32(grad_y_i, dot_product);
let grad_x_i = ctx.mul_f32(y_i, diff);
ctx.branch_if_not(valid_lane, "exit");
ctx.st_global_f32(grad_in_addr, grad_x_i);
ctx.label("exit");
ctx.ret();
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_softmax_backward_name() {
let kernel = SoftmaxBackwardKernel::new(64, 32);
assert_eq!(kernel.name(), "softmax_backward");
}
#[test]
fn test_softmax_backward_ptx_generation() {
let kernel = SoftmaxBackwardKernel::new(64, 32);
let ptx = kernel.emit_ptx();
assert!(ptx.contains(".entry softmax_backward"));
assert!(ptx.contains(".param .u64 output_ptr"));
assert!(ptx.contains(".param .u64 grad_output_ptr"));
assert!(ptx.contains(".param .u64 grad_input_ptr"));
assert!(ptx.contains(".param .u32 num_rows"));
assert!(ptx.contains("shfl.sync.down"));
assert!(ptx.contains("shfl.sync.idx"));
}
#[test]
fn test_softmax_backward_small_row() {
let kernel = SoftmaxBackwardKernel::new(128, 16);
let ptx = kernel.emit_ptx();
assert!(ptx.contains(".entry softmax_backward"));
assert!(ptx.contains("shfl.sync"));
}
#[test]
fn test_softmax_backward_barrier_safety() {
let kernel = SoftmaxBackwardKernel::new(64, 32);
let result = kernel.analyze_barrier_safety();
assert!(
result.is_safe,
"Softmax backward should be barrier-safe: {:?}",
result.violations
);
}
#[test]
#[should_panic(expected = "row_size must be ≤ 32")]
fn test_softmax_backward_row_size_limit() {
let _ = SoftmaxBackwardKernel::new(64, 64);
}
}