#![allow(clippy::disallowed_methods)]
use trueno::{Matrix, Vector};
fn main() {
println!("š§® Trueno Matrix Operations Demo");
println!("=================================\n");
println!("š Matrix Construction");
println!("----------------------\n");
let m1 = Matrix::from_vec(2, 3, vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
.expect("Example should not fail");
println!("Matrix m1 (2Ć3):");
print_matrix(&m1);
let m2 = Matrix::identity(3);
println!("Identity matrix Iā (3Ć3):");
print_matrix(&m2);
let m3 = Matrix::zeros(3, 2);
println!("Zero matrix 0āāā (3Ć2):");
print_matrix(&m3);
println!("\nš Matrix Multiplication (matmul)");
println!("----------------------------------\n");
let a = Matrix::from_vec(2, 3, vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
.expect("Example should not fail");
let b = Matrix::from_vec(3, 2, vec![7.0, 8.0, 9.0, 10.0, 11.0, 12.0])
.expect("Example should not fail");
println!("Matrix A (2Ć3):");
print_matrix(&a);
println!("Matrix B (3Ć2):");
print_matrix(&b);
let c = a.matmul(&b).expect("Example should not fail");
println!("A Ć B (2Ć2):");
print_matrix(&c);
println!("Calculation:");
println!(" C[0,0] = 1Ć7 + 2Ć9 + 3Ć11 = 58");
println!(" C[0,1] = 1Ć8 + 2Ć10 + 3Ć12 = 64");
println!(" C[1,0] = 4Ć7 + 5Ć9 + 6Ć11 = 139");
println!(" C[1,1] = 4Ć8 + 5Ć10 + 6Ć12 = 154");
println!("\nš Matrix Transpose");
println!("-------------------\n");
let m = Matrix::from_vec(2, 3, vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
.expect("Example should not fail");
println!("Original matrix M (2Ć3):");
print_matrix(&m);
let m_t = m.transpose();
println!("Transposed M^T (3Ć2):");
print_matrix(&m_t);
println!("Properties:");
println!(" ⢠Rows and columns swapped: 2Ć3 ā 3Ć2");
println!(" ⢠Element M[i,j] becomes M^T[j,i]");
println!(" ⢠(M^T)^T = M");
println!("\nšÆ Matrix-Vector Multiplication (matvec)");
println!("-----------------------------------------\n");
let matrix =
Matrix::from_vec(3, 4, vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0])
.expect("Example should not fail");
let vector = Vector::from_slice(&[1.0, 2.0, 3.0, 4.0]);
println!("Matrix A (3Ć4):");
print_matrix(&matrix);
println!("Vector v (4Ć1):");
print_vector(&vector);
let result = matrix.matvec(&vector).expect("Example should not fail");
println!("A Ć v (3Ć1):");
print_vector(&result);
println!("Calculation:");
println!(" result[0] = 1Ć1 + 2Ć2 + 3Ć3 + 4Ć4 = 30");
println!(" result[1] = 5Ć1 + 6Ć2 + 7Ć3 + 8Ć4 = 70");
println!(" result[2] = 9Ć1 + 10Ć2 + 11Ć3 + 12Ć4 = 110");
println!("\nšÆ Vector-Matrix Multiplication (vecmat)");
println!("-----------------------------------------\n");
let matrix2 =
Matrix::from_vec(3, 4, vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0])
.expect("Example should not fail");
let vector2 = Vector::from_slice(&[1.0, 2.0, 3.0]);
println!("Vector v^T (1Ć3):");
print_vector(&vector2);
println!("Matrix A (3Ć4):");
print_matrix(&matrix2);
let result2 = Matrix::vecmat(&vector2, &matrix2).expect("Example should not fail");
println!("v^T Ć A (1Ć4):");
print_vector(&result2);
println!("Calculation:");
println!(" result[0] = 1Ć1 + 2Ć5 + 3Ć9 = 38");
println!(" result[1] = 1Ć2 + 2Ć6 + 3Ć10 = 44");
println!(" result[2] = 1Ć3 + 2Ć7 + 3Ć11 = 50");
println!(" result[3] = 1Ć4 + 2Ć8 + 3Ć12 = 56");
println!("\nš§ Real-World Use Case: Neural Network Linear Layer");
println!("----------------------------------------------------\n");
let weights = Matrix::from_vec(
3,
4,
vec![
0.1, 0.2, -0.1, 0.3, -0.2, 0.1, 0.4, -0.1, 0.3, -0.1, 0.2, 0.1, ],
)
.expect("Example should not fail");
let input = Vector::from_slice(&[1.0, 2.0, 3.0, 4.0]);
let bias = Vector::from_slice(&[0.1, -0.1, 0.2]);
println!("Weight matrix W (3Ć4):");
print_matrix(&weights);
println!("Input vector x (4D):");
print_vector(&input);
println!("Bias vector b (3D):");
print_vector(&bias);
let wx = weights.matvec(&input).expect("Example should not fail");
let output = wx.add(&bias).expect("Example should not fail");
println!("Linear layer output y = WĆx + b:");
print_vector(&output);
println!(" ā This becomes the input to the activation function");
println!(" ā Common activations: ReLU, sigmoid, tanh, softmax");
println!("\nš¦ Batch Processing: Multiple Inputs");
println!("-------------------------------------\n");
println!("Processing 3 samples through the same linear layer:");
let samples = [
Vector::from_slice(&[1.0, 0.0, 0.0, 0.0]),
Vector::from_slice(&[0.0, 1.0, 0.0, 0.0]),
Vector::from_slice(&[0.0, 0.0, 1.0, 0.0]),
];
for (i, sample) in samples.iter().enumerate() {
let wx = weights.matvec(sample).expect("Example should not fail");
let output = wx.add(&bias).expect("Example should not fail");
println!(" Sample {}: {:?}", i + 1, output.as_slice());
}
println!("\nš¢ Batched Matrix Multiplication (3D Tensors)");
println!("-----------------------------------------------\n");
let batch = 2;
let m = 3;
let k = 4;
let n = 2;
let a_data: Vec<f32> = (0..batch * m * k).map(|i| i as f32 * 0.1).collect();
let b_data: Vec<f32> = (0..batch * k * n).map(|i| (i as f32 + 1.0) * 0.1).collect();
println!("Shape: [batch={}, m={}, k={}] @ [batch={}, k={}, n={}]", batch, m, k, batch, k, n);
println!("A data (flattened): {:?}", &a_data[..8]);
println!("B data (flattened): {:?}", &b_data[..8]);
let result = Matrix::batched_matmul(&a_data, &b_data, batch, m, k, n)
.expect("Batched matmul should succeed");
println!("Output shape: [batch={}, m={}, n={}]", batch, m, n);
println!("Result (flattened): {:?}", &result[..6]);
println!(" ā Each batch processed independently using SIMD matmul");
println!("\nš§ Batched 4D Matrix Multiplication (Attention Pattern)");
println!("--------------------------------------------------------\n");
let batch = 1;
let heads = 2;
let seq_len = 4;
let head_dim = 8;
let q_data: Vec<f32> =
(0..batch * heads * seq_len * head_dim).map(|i| (i as f32 * 0.01).sin()).collect();
let kt_data: Vec<f32> =
(0..batch * heads * head_dim * seq_len).map(|i| (i as f32 * 0.02).cos()).collect();
println!("Multi-head attention pattern: Q @ K^T");
println!(
" Q shape: [batch={}, heads={}, seq={}, head_dim={}]",
batch, heads, seq_len, head_dim
);
println!(
" K^T shape: [batch={}, heads={}, head_dim={}, seq={}]",
batch, heads, head_dim, seq_len
);
let attn_scores = Matrix::batched_matmul_4d(
&q_data, &kt_data, batch, heads, seq_len, head_dim, seq_len, )
.expect("4D batched matmul should succeed");
println!(
" Output shape: [batch={}, heads={}, seq={}, seq={}]",
batch, heads, seq_len, seq_len
);
println!(" Attention scores (first 8): {:?}", &attn_scores[..8]);
println!(" ā Used for transformer attention: softmax(Q @ K^T / sqrt(d)) @ V");
println!("\nā
Verified Mathematical Properties");
println!("------------------------------------\n");
let test_m = Matrix::from_vec(2, 2, vec![1.0, 2.0, 3.0, 4.0]).expect("Example should not fail");
let test_v = Vector::from_slice(&[5.0, 6.0]);
let identity = Matrix::identity(2);
let iv = identity.matvec(&test_v).expect("Example should not fail");
assert_eq!(iv.as_slice(), test_v.as_slice());
println!("ā Identity: IĆv = v");
let av = test_m.matvec(&test_v).expect("Example should not fail");
let m_t = test_m.transpose();
let v_mt = Matrix::vecmat(&test_v, &m_t).expect("Example should not fail");
assert_eq!(av.as_slice(), v_mt.as_slice());
println!("ā Transpose: (AĆv)^T = v^TĆA^T");
let zero_v = Vector::from_slice(&[0.0, 0.0]);
let result = test_m.matvec(&zero_v).expect("Example should not fail");
assert_eq!(result.as_slice(), &[0.0, 0.0]);
println!("ā Zero: AĆ0 = 0");
println!("\nš All matrix operations working correctly!");
println!("\nš For more examples, see:");
println!(" ⢠examples/activation_functions.rs - Neural network activations");
println!(" ⢠examples/ml_similarity.rs - ML vector operations");
println!(" ⢠examples/performance_demo.rs - SIMD performance");
}
fn print_matrix(m: &Matrix<f32>) {
let (rows, cols) = m.shape();
for i in 0..rows {
print!(" [");
for j in 0..cols {
if j > 0 {
print!(", ");
}
print!("{:6.1}", m.get(i, j).expect("Example should not fail"));
}
println!("]");
}
}
fn print_vector(v: &Vector<f32>) {
print!(" [");
for (i, val) in v.as_slice().iter().enumerate() {
if i > 0 {
print!(", ");
}
print!("{:6.1}", val);
}
println!("]");
}