llama-gguf 0.14.0

A high-performance Rust implementation of llama.cpp - LLM inference engine with full GGUF support
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
#include <metal_stdlib>
using namespace metal;

struct SoftmaxDivParams {
    int n;
    float inv_sum;
};

// Normalize by dividing by sum
kernel void softmax_div_f32(
    device float* data [[buffer(0)]],
    constant SoftmaxDivParams& params [[buffer(1)]],
    uint idx [[thread_position_in_grid]]
) {
    if (idx < uint(params.n)) {
        data[idx] *= params.inv_sum;
    }
}