llama-gguf 0.14.0

A high-performance Rust implementation of llama.cpp - LLM inference engine with full GGUF support
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
#version 450

layout(local_size_x = 256) in;

layout(set = 0, binding = 0) readonly buffer InputA { float a[]; };
layout(set = 0, binding = 1) readonly buffer InputB { float b[]; };
layout(set = 0, binding = 2) writeonly buffer Output { float result[]; };

layout(push_constant) uniform Params {
    int n;
};

void main() {
    uint idx = gl_GlobalInvocationID.x;
    if (idx < n) {
        result[idx] = a[idx] + b[idx];
    }
}