use crate::error::{Result, RuvLLMError};
use crate::kernels::rope::{precompute_rope_tables_with_config, RopeConfig, RopeTables};
use crate::kernels::{apply_rope_neon, flash_attention_neon, rms_norm_neon, AttentionConfig};
#[cfg(target_arch = "aarch64")]
use std::arch::aarch64::*;
#[derive(Debug, Clone)]
pub struct Phi3Config {
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_kv_heads: usize,
pub vocab_size: usize,
pub max_position_embeddings: usize,
pub original_max_position_embeddings: usize,
pub rope_theta: f32,
pub rope_scaling_factor: f32,
pub rms_norm_eps: f32,
pub sliding_window: Option<usize>,
pub head_dim: usize,
pub use_flash_attention: bool,
pub bos_token_id: u32,
pub eos_token_id: u32,
}
impl Default for Phi3Config {
fn default() -> Self {
Self::phi3_mini_4k()
}
}
impl Phi3Config {
pub fn phi3_mini_4k() -> Self {
Self {
hidden_size: 3072,
intermediate_size: 8192,
num_hidden_layers: 32,
num_attention_heads: 32,
num_kv_heads: 32, vocab_size: 32064,
max_position_embeddings: 4096,
original_max_position_embeddings: 4096,
rope_theta: 10000.0,
rope_scaling_factor: 1.0,
rms_norm_eps: 1e-5,
sliding_window: Some(2048),
head_dim: 96, use_flash_attention: true,
bos_token_id: 1,
eos_token_id: 32000,
}
}
pub fn phi3_mini_128k() -> Self {
Self {
hidden_size: 3072,
intermediate_size: 8192,
num_hidden_layers: 32,
num_attention_heads: 32,
num_kv_heads: 32,
vocab_size: 32064,
max_position_embeddings: 131072,
original_max_position_embeddings: 4096,
rope_theta: 10000.0,
rope_scaling_factor: 32.0, rms_norm_eps: 1e-5,
sliding_window: Some(2048),
head_dim: 96,
use_flash_attention: true,
bos_token_id: 1,
eos_token_id: 32000,
}
}
pub fn phi3_small() -> Self {
Self {
hidden_size: 2560,
intermediate_size: 6912,
num_hidden_layers: 32,
num_attention_heads: 32,
num_kv_heads: 32,
vocab_size: 32064,
max_position_embeddings: 8192,
original_max_position_embeddings: 8192,
rope_theta: 10000.0,
rope_scaling_factor: 1.0,
rms_norm_eps: 1e-5,
sliding_window: Some(2048),
head_dim: 80, use_flash_attention: true,
bos_token_id: 1,
eos_token_id: 32000,
}
}
pub fn phi3_medium() -> Self {
Self {
hidden_size: 5120,
intermediate_size: 13824,
num_hidden_layers: 40,
num_attention_heads: 40,
num_kv_heads: 40,
vocab_size: 32064,
max_position_embeddings: 4096,
original_max_position_embeddings: 4096,
rope_theta: 10000.0,
rope_scaling_factor: 1.0,
rms_norm_eps: 1e-5,
sliding_window: Some(2048),
head_dim: 128, use_flash_attention: true,
bos_token_id: 1,
eos_token_id: 32000,
}
}
pub fn attention_config(&self) -> AttentionConfig {
AttentionConfig {
num_heads: self.num_attention_heads,
num_kv_heads: self.num_kv_heads,
head_dim: self.head_dim,
max_seq_len: self.max_position_embeddings,
causal: true,
scale: 0.0, }
}
pub fn rope_config(&self) -> RopeConfig {
RopeConfig {
base: self.rope_theta,
head_dim: self.head_dim,
max_seq_len: self.max_position_embeddings,
scaling_factor: self.rope_scaling_factor,
ntk_aware: self.rope_scaling_factor > 1.0,
original_max_len: self.original_max_position_embeddings,
}
}
}
#[derive(Debug)]
pub struct Phi3Attention {
pub q_proj: Vec<f32>,
pub k_proj: Vec<f32>,
pub v_proj: Vec<f32>,
pub o_proj: Vec<f32>,
pub config: Phi3Config,
pub rope_tables: RopeTables,
}
impl Phi3Attention {
pub fn new(config: &Phi3Config) -> Self {
let hidden_size = config.hidden_size;
let qkv_size = hidden_size * hidden_size;
Self {
q_proj: vec![0.0; qkv_size],
k_proj: vec![0.0; qkv_size],
v_proj: vec![0.0; qkv_size],
o_proj: vec![0.0; qkv_size],
config: config.clone(),
rope_tables: precompute_rope_tables_with_config(&config.rope_config()),
}
}
pub fn load_weights(
&mut self,
q_proj: &[f32],
k_proj: &[f32],
v_proj: &[f32],
o_proj: &[f32],
) -> Result<()> {
let expected_size = self.config.hidden_size * self.config.hidden_size;
if q_proj.len() != expected_size
|| k_proj.len() != expected_size
|| v_proj.len() != expected_size
|| o_proj.len() != expected_size
{
return Err(RuvLLMError::Model(format!(
"Invalid weight dimensions: expected {}, got q={}, k={}, v={}, o={}",
expected_size,
q_proj.len(),
k_proj.len(),
v_proj.len(),
o_proj.len()
)));
}
self.q_proj.copy_from_slice(q_proj);
self.k_proj.copy_from_slice(k_proj);
self.v_proj.copy_from_slice(v_proj);
self.o_proj.copy_from_slice(o_proj);
Ok(())
}
pub fn forward(
&self,
hidden_states: &[f32],
positions: &[usize],
kv_cache: Option<(&mut Vec<f32>, &mut Vec<f32>)>,
) -> Result<Vec<f32>> {
let seq_len = positions.len();
let hidden_size = self.config.hidden_size;
let num_heads = self.config.num_attention_heads;
let head_dim = self.config.head_dim;
if hidden_states.len() != seq_len * hidden_size {
return Err(RuvLLMError::InvalidOperation(format!(
"Invalid hidden_states shape: expected {}, got {}",
seq_len * hidden_size,
hidden_states.len()
)));
}
let mut query =
self.linear_transform(hidden_states, &self.q_proj, hidden_size, hidden_size);
let mut key = self.linear_transform(hidden_states, &self.k_proj, hidden_size, hidden_size);
let value = self.linear_transform(hidden_states, &self.v_proj, hidden_size, hidden_size);
self.apply_surope(&mut query, positions);
self.apply_surope(&mut key, positions);
let (key_states, value_states) = if let Some((k_cache, v_cache)) = kv_cache {
k_cache.extend_from_slice(&key);
v_cache.extend_from_slice(&value);
(k_cache.as_slice(), v_cache.as_slice())
} else {
(key.as_slice(), value.as_slice())
};
let kv_len = key_states.len() / hidden_size;
let scale = 1.0 / (head_dim as f32).sqrt();
let mut output = vec![0.0; seq_len * hidden_size];
for h in 0..num_heads {
for t in 0..seq_len {
let q_offset = (t * num_heads + h) * head_dim;
let q_slice = &query[q_offset..q_offset + head_dim];
let mut k_slice = Vec::with_capacity(kv_len * head_dim);
let mut v_slice = Vec::with_capacity(kv_len * head_dim);
for kv_t in 0..kv_len {
let kv_offset = (kv_t * num_heads + h) * head_dim;
k_slice.extend_from_slice(&key_states[kv_offset..kv_offset + head_dim]);
v_slice.extend_from_slice(&value_states[kv_offset..kv_offset + head_dim]);
}
let (k_slice, v_slice, effective_kv_len) =
if let Some(window) = self.config.sliding_window {
let pos = positions[t];
let start = pos.saturating_sub(window);
let end = kv_len;
if start > 0 {
let start_offset = start * head_dim;
(
k_slice[start_offset..].to_vec(),
v_slice[start_offset..].to_vec(),
end - start,
)
} else {
(k_slice, v_slice, kv_len)
}
} else {
(k_slice, v_slice, kv_len)
};
let head_output = flash_attention_neon(q_slice, &k_slice, &v_slice, scale, true);
let out_offset = (t * num_heads + h) * head_dim;
output[out_offset..out_offset + head_dim].copy_from_slice(&head_output);
}
}
let output = self.linear_transform(&output, &self.o_proj, hidden_size, hidden_size);
Ok(output)
}
fn apply_surope(&self, x: &mut [f32], positions: &[usize]) {
let head_dim = self.config.head_dim;
let num_heads = self.config.num_attention_heads;
let seq_len = positions.len();
for h in 0..num_heads {
for t in 0..seq_len {
let offset = (t * num_heads + h) * head_dim;
let mut head_vec = x[offset..offset + head_dim].to_vec();
let scaled_pos = (positions[t] as f32 / self.config.rope_scaling_factor) as usize;
apply_rope_neon(
&mut head_vec,
&[scaled_pos],
head_dim,
self.config.rope_theta,
);
x[offset..offset + head_dim].copy_from_slice(&head_vec);
}
}
}
fn linear_transform(
&self,
input: &[f32],
weights: &[f32],
in_dim: usize,
out_dim: usize,
) -> Vec<f32> {
let batch_size = input.len() / in_dim;
let mut output = vec![0.0; batch_size * out_dim];
for b in 0..batch_size {
for o in 0..out_dim {
let mut sum = 0.0;
for i in 0..in_dim {
sum += input[b * in_dim + i] * weights[o * in_dim + i];
}
output[b * out_dim + o] = sum;
}
}
output
}
}
#[derive(Debug)]
pub struct Phi3MLP {
pub gate_proj: Vec<f32>,
pub up_proj: Vec<f32>,
pub down_proj: Vec<f32>,
pub hidden_size: usize,
pub intermediate_size: usize,
}
impl Phi3MLP {
pub fn new(config: &Phi3Config) -> Self {
Self {
gate_proj: vec![0.0; config.intermediate_size * config.hidden_size],
up_proj: vec![0.0; config.intermediate_size * config.hidden_size],
down_proj: vec![0.0; config.hidden_size * config.intermediate_size],
hidden_size: config.hidden_size,
intermediate_size: config.intermediate_size,
}
}
pub fn load_weights(
&mut self,
gate_proj: &[f32],
up_proj: &[f32],
down_proj: &[f32],
) -> Result<()> {
let gate_up_size = self.intermediate_size * self.hidden_size;
let down_size = self.hidden_size * self.intermediate_size;
if gate_proj.len() != gate_up_size
|| up_proj.len() != gate_up_size
|| down_proj.len() != down_size
{
return Err(RuvLLMError::Model(
"Invalid MLP weight dimensions".to_string(),
));
}
self.gate_proj.copy_from_slice(gate_proj);
self.up_proj.copy_from_slice(up_proj);
self.down_proj.copy_from_slice(down_proj);
Ok(())
}
pub fn forward(&self, hidden_states: &[f32]) -> Result<Vec<f32>> {
let batch_size = hidden_states.len() / self.hidden_size;
let gate = self.linear(
hidden_states,
&self.gate_proj,
self.hidden_size,
self.intermediate_size,
);
let gate_activated = self.silu(&gate);
let up = self.linear(
hidden_states,
&self.up_proj,
self.hidden_size,
self.intermediate_size,
);
let hidden: Vec<f32> = gate_activated
.iter()
.zip(up.iter())
.map(|(g, u)| g * u)
.collect();
let output = self.linear(
&hidden,
&self.down_proj,
self.intermediate_size,
self.hidden_size,
);
Ok(output)
}
fn linear(&self, input: &[f32], weights: &[f32], in_dim: usize, out_dim: usize) -> Vec<f32> {
let batch_size = input.len() / in_dim;
let mut output = vec![0.0; batch_size * out_dim];
#[cfg(target_arch = "aarch64")]
unsafe {
self.linear_neon(input, weights, &mut output, batch_size, in_dim, out_dim);
}
#[cfg(not(target_arch = "aarch64"))]
{
for b in 0..batch_size {
for o in 0..out_dim {
let mut sum = 0.0;
for i in 0..in_dim {
sum += input[b * in_dim + i] * weights[o * in_dim + i];
}
output[b * out_dim + o] = sum;
}
}
}
output
}
#[cfg(target_arch = "aarch64")]
unsafe fn linear_neon(
&self,
input: &[f32],
weights: &[f32],
output: &mut [f32],
batch_size: usize,
in_dim: usize,
out_dim: usize,
) {
let in_ptr: *const f32 = input.as_ptr();
let w_ptr: *const f32 = weights.as_ptr();
let out_ptr: *mut f32 = output.as_mut_ptr();
for b in 0..batch_size {
for o in 0..out_dim {
let mut acc = vdupq_n_f32(0.0);
let mut i = 0;
while i + 4 <= in_dim {
let x = vld1q_f32(in_ptr.add(b * in_dim + i));
let w = vld1q_f32(w_ptr.add(o * in_dim + i));
acc = vfmaq_f32(acc, x, w);
i += 4;
}
let mut sum = vaddvq_f32(acc);
while i < in_dim {
sum += *in_ptr.add(b * in_dim + i) * *w_ptr.add(o * in_dim + i);
i += 1;
}
*out_ptr.add(b * out_dim + o) = sum;
}
}
}
fn silu(&self, x: &[f32]) -> Vec<f32> {
crate::kernels::silu_vec(x)
}
}
#[derive(Debug)]
pub struct Phi3DecoderLayer {
pub self_attn: Phi3Attention,
pub mlp: Phi3MLP,
pub input_layernorm: Vec<f32>,
pub post_attention_layernorm: Vec<f32>,
pub rms_norm_eps: f32,
pub hidden_size: usize,
}
impl Phi3DecoderLayer {
pub fn new(config: &Phi3Config) -> Self {
Self {
self_attn: Phi3Attention::new(config),
mlp: Phi3MLP::new(config),
input_layernorm: vec![1.0; config.hidden_size],
post_attention_layernorm: vec![1.0; config.hidden_size],
rms_norm_eps: config.rms_norm_eps,
hidden_size: config.hidden_size,
}
}
pub fn forward(
&self,
hidden_states: &[f32],
positions: &[usize],
kv_cache: Option<(&mut Vec<f32>, &mut Vec<f32>)>,
) -> Result<Vec<f32>> {
let seq_len = positions.len();
let mut normed = hidden_states.to_vec();
for t in 0..seq_len {
let offset = t * self.hidden_size;
let slice = &mut normed[offset..offset + self.hidden_size];
rms_norm_neon(slice, &self.input_layernorm, self.rms_norm_eps);
}
let attn_output = self.self_attn.forward(&normed, positions, kv_cache)?;
let mut hidden: Vec<f32> = hidden_states
.iter()
.zip(attn_output.iter())
.map(|(h, a)| h + a)
.collect();
let mut normed = hidden.clone();
for t in 0..seq_len {
let offset = t * self.hidden_size;
let slice = &mut normed[offset..offset + self.hidden_size];
rms_norm_neon(slice, &self.post_attention_layernorm, self.rms_norm_eps);
}
let mlp_output = self.mlp.forward(&normed)?;
for (h, m) in hidden.iter_mut().zip(mlp_output.iter()) {
*h += m;
}
Ok(hidden)
}
}
#[derive(Debug)]
pub struct Phi3Model {
pub config: Phi3Config,
pub embed_tokens: Vec<f32>,
pub layers: Vec<Phi3DecoderLayer>,
pub norm: Vec<f32>,
pub lm_head: Option<Vec<f32>>,
pub tie_word_embeddings: bool,
}
impl Phi3Model {
pub fn new(config: &Phi3Config) -> Result<Self> {
let mut layers = Vec::with_capacity(config.num_hidden_layers);
for _ in 0..config.num_hidden_layers {
layers.push(Phi3DecoderLayer::new(config));
}
Ok(Self {
config: config.clone(),
embed_tokens: vec![0.0; config.vocab_size * config.hidden_size],
layers,
norm: vec![1.0; config.hidden_size],
lm_head: None,
tie_word_embeddings: true,
})
}
pub fn forward(
&self,
input_ids: &[u32],
positions: &[usize],
mut kv_caches: Option<&mut Vec<(Vec<f32>, Vec<f32>)>>,
) -> Result<Vec<f32>> {
let seq_len = positions.len();
if input_ids.len() != seq_len {
return Err(RuvLLMError::InvalidOperation(format!(
"input_ids length {} != positions length {}",
input_ids.len(),
seq_len
)));
}
let mut hidden_states = Vec::with_capacity(seq_len * self.config.hidden_size);
for &token_id in input_ids {
let offset = (token_id as usize) * self.config.hidden_size;
if offset + self.config.hidden_size > self.embed_tokens.len() {
return Err(RuvLLMError::InvalidOperation(format!(
"Token ID {} out of vocabulary bounds",
token_id
)));
}
hidden_states
.extend_from_slice(&self.embed_tokens[offset..offset + self.config.hidden_size]);
}
for (layer_idx, layer) in self.layers.iter().enumerate() {
let kv_cache = kv_caches.as_mut().map(|caches| {
while caches.len() <= layer_idx {
caches.push((Vec::new(), Vec::new()));
}
let (k, v) = &mut caches[layer_idx];
(k, v)
});
hidden_states = layer.forward(&hidden_states, positions, kv_cache)?;
}
for t in 0..seq_len {
let offset = t * self.config.hidden_size;
let slice = &mut hidden_states[offset..offset + self.config.hidden_size];
rms_norm_neon(slice, &self.norm, self.config.rms_norm_eps);
}
let lm_weights = if self.tie_word_embeddings {
&self.embed_tokens
} else {
self.lm_head
.as_ref()
.ok_or_else(|| RuvLLMError::InvalidOperation("No LM head weights".to_string()))?
};
let mut logits = vec![0.0; seq_len * self.config.vocab_size];
for t in 0..seq_len {
for v in 0..self.config.vocab_size {
let mut sum = 0.0;
for h in 0..self.config.hidden_size {
sum += hidden_states[t * self.config.hidden_size + h]
* lm_weights[v * self.config.hidden_size + h];
}
logits[t * self.config.vocab_size + v] = sum;
}
}
Ok(logits)
}
pub fn apply_chat_template(messages: &[(String, String)]) -> String {
let mut result = String::new();
for (role, content) in messages {
result.push_str(&format!("<|{}|>\n{}<|end|>\n", role, content));
}
result.push_str("<|assistant|>");
result
}
#[cfg(feature = "candle")]
pub fn from_gguf(_path: &std::path::Path) -> Result<Self> {
Err(RuvLLMError::NotFound(
"GGUF loading not yet implemented for Phi-3".to_string(),
))
}
#[cfg(feature = "candle")]
pub fn from_safetensors(_path: &std::path::Path) -> Result<Self> {
Err(RuvLLMError::NotFound(
"Safetensors loading not yet implemented for Phi-3".to_string(),
))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_phi3_config() {
let config = Phi3Config::phi3_mini_4k();
assert_eq!(config.hidden_size, 3072);
assert_eq!(config.num_hidden_layers, 32);
assert_eq!(config.head_dim, 96);
assert_eq!(config.sliding_window, Some(2048));
}
#[test]
fn test_phi3_config_128k() {
let config = Phi3Config::phi3_mini_128k();
assert_eq!(config.max_position_embeddings, 131072);
assert_eq!(config.rope_scaling_factor, 32.0);
}
#[test]
fn test_phi3_attention_config() {
let config = Phi3Config::phi3_mini_4k();
let attn_config = config.attention_config();
assert_eq!(attn_config.num_heads, 32);
assert_eq!(attn_config.num_kv_heads, 32);
assert!(attn_config.causal);
}
#[test]
fn test_phi3_mlp_silu() {
let config = Phi3Config::phi3_mini_4k();
let mlp = Phi3MLP::new(&config);
let input = vec![0.0, 1.0, -1.0, 2.0];
let output = mlp.silu(&input);
assert!((output[0]).abs() < 1e-5);
assert!((output[1] - 0.731).abs() < 0.01);
assert!((output[2] - (-0.269)).abs() < 0.01);
}
#[test]
fn test_phi3_model_creation() {
let config = Phi3Config::phi3_mini_4k();
let model = Phi3Model::new(&config).unwrap();
assert_eq!(model.layers.len(), 32);
assert_eq!(
model.embed_tokens.len(),
config.vocab_size * config.hidden_size
);
}
#[test]
fn test_chat_template() {
let messages = vec![
("user".to_string(), "Hello!".to_string()),
("assistant".to_string(), "Hi there!".to_string()),
("user".to_string(), "How are you?".to_string()),
];
let template = Phi3Model::apply_chat_template(&messages);
assert!(template.contains("<|user|>"));
assert!(template.contains("<|assistant|>"));
assert!(template.contains("<|end|>"));
assert!(template.ends_with("<|assistant|>"));
}
}