use std::mem::{align_of, size_of};
use crate::format::ModelType;
#[derive(Debug, Clone)]
pub struct TruenoNativeModel {
pub model_type: ModelType,
pub n_params: u32,
pub n_features: u32,
pub n_outputs: u32,
pub params: Option<AlignedVec<f32>>,
pub bias: Option<AlignedVec<f32>>,
pub extra: Option<ModelExtra>,
}
impl TruenoNativeModel {
#[must_use]
pub const fn new(
model_type: ModelType,
n_params: u32,
n_features: u32,
n_outputs: u32,
) -> Self {
Self {
model_type,
n_params,
n_features,
n_outputs,
params: None,
bias: None,
extra: None,
}
}
#[must_use]
pub fn with_params(mut self, params: AlignedVec<f32>) -> Self {
self.params = Some(params);
self
}
#[must_use]
pub fn with_bias(mut self, bias: AlignedVec<f32>) -> Self {
self.bias = Some(bias);
self
}
#[must_use]
pub fn with_extra(mut self, extra: ModelExtra) -> Self {
self.extra = Some(extra);
self
}
#[must_use]
pub fn is_aligned(&self) -> bool {
let params_aligned = self.params.as_ref().map_or(true, AlignedVec::is_aligned);
let bias_aligned = self.bias.as_ref().map_or(true, AlignedVec::is_aligned);
params_aligned && bias_aligned
}
#[must_use]
pub fn size_bytes(&self) -> usize {
let params_size = self.params.as_ref().map_or(0, AlignedVec::size_bytes);
let bias_size = self.bias.as_ref().map_or(0, AlignedVec::size_bytes);
let extra_size = self.extra.as_ref().map_or(0, ModelExtra::size_bytes);
params_size + bias_size + extra_size
}
pub fn validate(&self) -> Result<(), NativeModelError> {
if let Some(ref params) = self.params {
if params.len() != self.n_params as usize {
return Err(NativeModelError::ParamCountMismatch {
declared: self.n_params as usize,
actual: params.len(),
});
}
}
if let Some(ref params) = self.params {
for (i, &val) in params.as_slice().iter().enumerate() {
if !val.is_finite() {
return Err(NativeModelError::InvalidParameter {
index: i,
value: val,
});
}
}
}
if let Some(ref bias) = self.bias {
for (i, &val) in bias.as_slice().iter().enumerate() {
if !val.is_finite() {
return Err(NativeModelError::InvalidBias {
index: i,
value: val,
});
}
}
}
Ok(())
}
#[must_use]
pub fn params_ptr(&self) -> Option<*const f32> {
self.params.as_ref().map(AlignedVec::as_ptr)
}
#[must_use]
pub fn bias_ptr(&self) -> Option<*const f32> {
self.bias.as_ref().map(AlignedVec::as_ptr)
}
pub fn predict_linear(&self, features: &[f32]) -> Result<f32, NativeModelError> {
if features.len() != self.n_features as usize {
return Err(NativeModelError::FeatureMismatch {
expected: self.n_features as usize,
got: features.len(),
});
}
let params = self
.params
.as_ref()
.ok_or(NativeModelError::MissingParams)?;
let dot: f32 = params
.as_slice()
.iter()
.zip(features.iter())
.map(|(p, x)| p * x)
.sum();
let bias = self
.bias
.as_ref()
.and_then(|b| b.as_slice().first().copied())
.unwrap_or(0.0);
Ok(dot + bias)
}
}
impl Default for TruenoNativeModel {
fn default() -> Self {
Self::new(ModelType::LinearRegression, 0, 0, 1)
}
}
#[derive(Debug, Clone)]
pub struct AlignedVec<T: Copy + Default> {
data: Vec<T>,
len: usize,
capacity: usize,
}
impl<T: Copy + Default> AlignedVec<T> {
#[must_use]
pub fn with_capacity(capacity: usize) -> Self {
let size_of_t = size_of::<T>();
let aligned_cap = if size_of_t > 0 {
(capacity * size_of_t).div_ceil(64) * 64 / size_of_t
} else {
capacity
};
let aligned_cap = aligned_cap.max(capacity);
let data = vec![T::default(); aligned_cap];
Self {
data,
len: 0,
capacity: aligned_cap,
}
}
#[must_use]
pub fn from_slice(slice: &[T]) -> Self {
let mut vec = Self::with_capacity(slice.len());
vec.data[..slice.len()].copy_from_slice(slice);
vec.len = slice.len();
vec
}
#[must_use]
pub fn zeros(len: usize) -> Self {
let mut vec = Self::with_capacity(len);
vec.len = len;
vec
}
#[must_use]
pub const fn len(&self) -> usize {
self.len
}
#[must_use]
pub const fn is_empty(&self) -> bool {
self.len == 0
}
#[must_use]
pub const fn capacity(&self) -> usize {
self.capacity
}
#[must_use]
pub fn as_ptr(&self) -> *const T {
self.data.as_ptr()
}
#[must_use]
pub fn as_mut_ptr(&mut self) -> *mut T {
self.data.as_mut_ptr()
}
#[must_use]
pub fn as_slice(&self) -> &[T] {
&self.data[..self.len]
}
pub fn as_mut_slice(&mut self) -> &mut [T] {
&mut self.data[..self.len]
}
#[must_use]
pub fn is_aligned(&self) -> bool {
if self.data.is_empty() || size_of::<T>() == 0 {
return true;
}
(self.data.as_ptr() as usize).is_multiple_of(align_of::<T>())
}
#[must_use]
pub fn size_bytes(&self) -> usize {
self.len * size_of::<T>()
}
pub fn push(&mut self, value: T) {
if self.len >= self.data.len() {
let new_cap = (self.capacity * 2).max(16);
let mut new_data = vec![T::default(); new_cap];
new_data[..self.len].copy_from_slice(&self.data[..self.len]);
self.data = new_data;
self.capacity = new_cap;
}
self.data[self.len] = value;
self.len += 1;
}
pub fn clear(&mut self) {
self.len = 0;
}
#[must_use]
pub fn get(&self, index: usize) -> Option<&T> {
if index < self.len {
Some(&self.data[index])
} else {
None
}
}
pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
if index < self.len {
Some(&mut self.data[index])
} else {
None
}
}
pub fn set(&mut self, index: usize, value: T) -> bool {
if index < self.len {
self.data[index] = value;
true
} else {
false
}
}
}
impl<T: Copy + Default> Default for AlignedVec<T> {
fn default() -> Self {
Self::with_capacity(0)
}
}
impl<T: Copy + Default> std::ops::Index<usize> for AlignedVec<T> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
&self.data[index]
}
}
impl<T: Copy + Default> std::ops::IndexMut<usize> for AlignedVec<T> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
&mut self.data[index]
}
}
impl<T: Copy + Default> FromIterator<T> for AlignedVec<T> {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let vec: Vec<T> = iter.into_iter().collect();
Self::from_slice(&vec)
}
}
impl<T: Copy + Default + PartialEq> PartialEq for AlignedVec<T> {
fn eq(&self, other: &Self) -> bool {
self.as_slice() == other.as_slice()
}
}
mod model_extra;
pub use model_extra::*;