use std::{
borrow::Cow,
sync::{atomic::AtomicUsize, Arc},
};
use candle_core::{quantized::GgmlDType, DType, Device, Result, Tensor};
use candle_nn::Linear;
mod ops;
use crate::{
generate_isq, generate_isq_imatrix,
hqq::{ISQ_HQQ_DEFAULT_OPT_STEPS, ISQ_HQQ_GROUP_SIZE},
utils::{serialize_tensor, UQFF_VERSION},
AfqBits, AfqGroupSize, AfqLayer, DummyLayer, FP8Linear, GgufMatMul, HqqAxis, HqqBits,
HqqConfig, HqqLayer, IsqType, QuantMethod, QuantMethodConfig, QuantizeOntoGuard,
QuantizedConfig, QuantizedSerde, QuantizedSerdeType, Shard, ShardedVarBuilder, UnquantLinear,
};
#[derive(Debug)]
pub struct PerTensorFP8Linear {
weight: Tensor,
#[allow(dead_code)]
weight_scale_inv: Tensor,
#[allow(dead_code)]
activation_scale: Option<Tensor>,
bias: Option<Tensor>,
#[allow(dead_code)]
dequant_dtype: DType,
}
impl QuantMethod for PerTensorFP8Linear {
fn new(method: QuantMethodConfig) -> candle_core::Result<Self>
where
Self: Sized,
{
match method {
QuantMethodConfig::PerTensorFP8 {
weight,
weight_scale_inv,
activation_scale,
bias,
dequant_dtype,
} => {
let dequant_weight =
ops::fp8_pertensor_dequantize(&weight, &weight_scale_inv, dequant_dtype)?;
Ok(Self {
weight: dequant_weight,
weight_scale_inv,
activation_scale,
bias,
dequant_dtype,
})
}
_ => unreachable!(),
}
}
fn dequantize_w(&self) -> Result<Tensor> {
Ok(self.weight.clone())
}
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let unquant = UnquantLinear::new(QuantMethodConfig::Unquantized(Linear::new(
self.weight.clone(),
self.bias.clone(),
)))?;
unquant.forward(x)
}
fn quantized_act_type(&self) -> Option<DType> {
None
}
fn add_delta_w(&self, _delta: &Tensor) -> Result<Arc<dyn QuantMethod>> {
candle_core::bail!("PerTensorFP8Linear does not support add_delta_w")
}
fn dtype_and_device(&self) -> (DType, Device) {
(DType::F8E4M3, self.weight.device().clone())
}
fn apply_isq(
self: Arc<Self>,
dtype: Option<IsqType>,
device: Device,
n_quantized: &AtomicUsize,
imatrix_weight: Option<Vec<f32>>,
guard: QuantizeOntoGuard,
) -> Result<Arc<dyn QuantMethod>> {
let weight = self.dequantize_w()?;
match dtype {
Some(IsqType::HQQ4 | IsqType::HQQ8) => {
let _acquired_quantize_guard = guard.acquire(&device);
if imatrix_weight.is_some() {
candle_core::bail!("HQQ does not support imatrix.");
}
n_quantized.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
let bits = match dtype.unwrap() {
IsqType::HQQ8 => HqqBits::Eight,
IsqType::HQQ4 => HqqBits::Four,
_ => unreachable!(),
};
let cfg = HqqConfig {
bits,
group_size: ISQ_HQQ_GROUP_SIZE.try_into()?,
axis: HqqAxis::Zero,
optimization_steps: ISQ_HQQ_DEFAULT_OPT_STEPS,
round_zeros: false,
channel_wise: true,
};
let res = HqqLayer::quantize(&weight.to_device(&device)?, &device, cfg)?;
if let Some(bias) = &self.bias {
let bias = bias
.to_device(&device)?
.to_dtype(res.dtype_and_device().0)?;
Ok(Arc::new(res.with_bias(bias)))
} else {
Ok(Arc::new(res))
}
}
Some(IsqType::AFQ2 | IsqType::AFQ3 | IsqType::AFQ4 | IsqType::AFQ6 | IsqType::AFQ8) => {
let _acquired_quantize_guard = guard.acquire(&device);
if imatrix_weight.is_some() {
candle_core::bail!("AFQ does not support imatrix.");
}
n_quantized.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
let bits = match dtype.unwrap() {
IsqType::AFQ8 => AfqBits::Eight,
IsqType::AFQ6 => AfqBits::Six,
IsqType::AFQ4 => AfqBits::Four,
IsqType::AFQ3 => AfqBits::Three,
IsqType::AFQ2 => AfqBits::Two,
_ => unreachable!(),
};
Ok(Arc::new(AfqLayer::new(QuantMethodConfig::Afq {
weight: weight.to_device(&device)?,
bias: self.bias.as_ref().map(|b| b.to_device(&device).unwrap()),
bits,
group_size: AfqGroupSize::default(),
})?))
}
Some(
IsqType::Q2K
| IsqType::Q3K
| IsqType::Q4K
| IsqType::Q4_0
| IsqType::Q4_1
| IsqType::Q5K
| IsqType::Q5_0
| IsqType::Q5_1
| IsqType::Q6K
| IsqType::Q8K
| IsqType::Q8_0
| IsqType::Q8_1,
) => {
let dtype: GgmlDType = dtype.unwrap().try_into()?;
let res = if let Some(imatrix_weight) = imatrix_weight {
generate_isq_imatrix!(weight, imatrix_weight, device, dtype, n_quantized, guard)
} else {
generate_isq!(weight, device, dtype, n_quantized, guard)
};
Ok(Arc::new(GgufMatMul::new(QuantMethodConfig::Gguf {
q_weight: res,
b: self
.bias
.as_ref()
.map(|b| b.to_dtype(DType::F32).unwrap().to_device(&device).unwrap()),
})?))
}
Some(IsqType::F8E4M3) => {
let _acquired_quantize_guard = guard.acquire(&device);
if imatrix_weight.is_some() {
candle_core::bail!("F8E4M3 does not support imatrix.");
}
let w = weight.to_device(&device)?;
let b = if let Some(b) = &self.bias {
Some(b.to_device(&device)?)
} else {
None
};
Ok(Arc::new(FP8Linear::new(QuantMethodConfig::FP8 {
lin: Linear::new(w, b),
dtype: DType::F8E4M3,
})?))
}
Some(IsqType::F8Q8) => {
let _acquired_quantize_guard = guard.acquire(&device);
if imatrix_weight.is_some() {
candle_core::bail!("F8Q8 does not support imatrix.");
}
let w = weight.to_device(&device)?;
let b = if let Some(b) = &self.bias {
Some(b.to_device(&device)?)
} else {
None
};
Ok(Arc::new(crate::F8Q8Linear::from_weight(&w, b)?))
}
Some(IsqType::MXFP4) => {
let _acquired_quantize_guard = guard.acquire(&device);
if imatrix_weight.is_some() {
candle_core::bail!("MXFP4 does not support imatrix.");
}
n_quantized.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
let w = weight.to_device(&device)?;
let b = self
.bias
.as_ref()
.map(|b| b.to_device(&device))
.transpose()?;
crate::MXFP4Layer::quantize(&w, b, &device)
}
None => {
let _acquired_quantize_guard = guard.acquire(&device);
let w = weight.to_device(&device)?;
let b = if let Some(b) = &self.bias {
Some(b.to_device(&device)?)
} else {
None
};
Ok(Arc::new(UnquantLinear::new(
QuantMethodConfig::Unquantized(Linear::new(w, b)),
)?))
}
}
}
}
impl QuantizedSerde for PerTensorFP8Linear {
fn isq_serde_supported(&self) -> bool {
true
}
fn name(&self) -> &'static str {
"pertensor-fp8-linear"
}
fn serialize(&self) -> Result<Cow<'_, [u8]>> {
self.serialize_with_bias(self.bias.clone())
}
fn serialize_with_bias(&self, bias: Option<Tensor>) -> Result<Cow<'_, [u8]>> {
let mut buffer = Vec::new();
buffer.extend(&UQFF_VERSION.to_le_bytes());
buffer.push(QuantizedSerdeType::Unquant as u8);
buffer.push(bias.is_some() as u8);
serialize_tensor(&mut buffer, &self.weight)?;
if let Some(bias) = &bias {
serialize_tensor(&mut buffer, bias)?;
}
Ok(Cow::from(buffer))
}
}
pub fn pertensor_fp8_linear_b(
in_dim: usize,
out_dim: usize,
_config: &QuantizedConfig,
bias: bool,
_hints: Shard,
vb: ShardedVarBuilder,
) -> Result<Arc<dyn QuantMethod>> {
if vb.contains_tensor("weight") && !vb.contains_tensor("weight_scale_inv") {
return crate::linear_b(in_dim, out_dim, bias, &None, vb);
}
if !vb.contains_tensor("weight") {
let layer = <DummyLayer as QuantMethod>::new(QuantMethodConfig::Dummy)?;
return Ok(Arc::new(layer) as Arc<dyn QuantMethod>);
}
let weight = vb.get_with_hints_dtype(
(out_dim, in_dim),
"weight",
Default::default(),
DType::F8E4M3,
)?;
let weight_scale_inv =
vb.get_with_hints_dtype((), "weight_scale_inv", Default::default(), DType::F32)?;
let activation_scale = if vb.contains_tensor("activation_scale") {
Some(vb.get_with_hints_dtype((), "activation_scale", Default::default(), DType::F32)?)
} else {
None
};
let bias = if bias && vb.contains_tensor("bias") {
Some(vb.get((out_dim,), "bias")?)
} else {
None
};
let dequant_dtype = bias.as_ref().map(|b| b.dtype()).unwrap_or(DType::BF16);
Ok(Arc::new(PerTensorFP8Linear::new(
QuantMethodConfig::PerTensorFP8 {
weight,
weight_scale_inv,
activation_scale,
bias,
dequant_dtype,
},
)?))
}