llama-cpp-sys-4 0.2.46

Low Level Bindings to llama.cpp
Documentation
1
2
3
4
5
6
7
8
9
10
#pragma once
#include "common.h"

size_t ggml_backend_amx_desired_wsize(const struct ggml_tensor * dst);

size_t ggml_backend_amx_get_alloc_size(const struct ggml_tensor * tensor);

void ggml_backend_amx_convert_weight(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);

void ggml_backend_amx_mul_mat(const struct ggml_compute_params * params, struct ggml_tensor * dst);