llama_cpp_sys 0.3.2

Automatically-generated bindings to llama.cpp's C API
Documentation
1
2
3
4
5
#include "common.cuh"

#define CUDA_UPSCALE_BLOCK_SIZE 256

void ggml_cuda_op_upscale(ggml_backend_cuda_context & ctx, ggml_tensor * dst);