llama-gguf 0.14.0

A high-performance Rust implementation of llama.cpp - LLM inference engine with full GGUF support
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
#include <metal_stdlib>
using namespace metal;

struct ScaleParams {
    int n;
    float scalar;
};

kernel void scale_f32(
    device const float* a [[buffer(0)]],
    device float* result [[buffer(1)]],
    constant ScaleParams& params [[buffer(2)]],
    uint idx [[thread_position_in_grid]]
) {
    if (idx < uint(params.n)) {
        result[idx] = a[idx] * params.scalar;
    }
}