Build failed. If you want to re-trigger a documentation build, you can do it here. You can find more information on docs.rs builds documentation on the builds page.
# rustc version
rustc 1.90.0-nightly (855e0fe46 2025-07-11)# docs.rs version
docsrs 0.6.0 (c1135d19 2025-07-02)# build log
[INFO] running `Command { std: "docker" "create" "-v" "/home/cratesfyi/workspace-builder/builds/llama_cpp_rs-0.3.0/target:/opt/rustwide/target:rw,Z" "-v" "/home/cratesfyi/workspace-builder/builds/llama_cpp_rs-0.3.0/source:/opt/rustwide/workdir:ro,Z" "-v" "/home/cratesfyi/workspace-builder/cargo-home:/opt/rustwide/cargo-home:ro,Z" "-v" "/home/cratesfyi/workspace-builder/rustup-home:/opt/rustwide/rustup-home:ro,Z" "-e" "SOURCE_DIR=/opt/rustwide/workdir" "-e" "CARGO_TARGET_DIR=/opt/rustwide/target" "-e" "DOCS_RS=1" "-e" "CARGO_HOME=/opt/rustwide/cargo-home" "-e" "RUSTUP_HOME=/opt/rustwide/rustup-home" "-w" "/opt/rustwide/workdir" "-m" "6442450944" "--cpus" "6" "--user" "1001:1001" "--network" "none" "ghcr.io/rust-lang/crates-build-env/linux@sha256:90999bfc7ae267e83380e433d8e61a7c072ca6729e92edbae886d3423b3a6f4c" "/opt/rustwide/cargo-home/bin/cargo" "+nightly" "rustdoc" "--lib" "-Zrustdoc-map" "--config" "build.rustdocflags=[\"--cfg\", \"docsrs\", \"-Z\", \"unstable-options\", \"--emit=invocation-specific\", \"--resource-suffix\", \"-20250711-1.90.0-nightly-855e0fe46\", \"--static-root-path\", \"/-/rustdoc.static/\", \"--cap-lints\", \"warn\", \"--extern-html-root-takes-precedence\"]" "--offline" "-Zunstable-options" "--config=doc.extern-map.registries.crates-io=\"https://docs.rs/{pkg_name}/{version}/x86_64-unknown-linux-gnu\"" "-Zrustdoc-scrape-examples" "-j6" "--target" "x86_64-unknown-linux-gnu", kill_on_drop: false }`
[INFO] [stderr] WARNING: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.
[INFO] [stdout] 507910b74a1f89a33612341e2b46f865422a0387331ee38f54357e1ae7a97496
[INFO] running `Command { std: "docker" "start" "-a" "507910b74a1f89a33612341e2b46f865422a0387331ee38f54357e1ae7a97496", kill_on_drop: false }`
[INFO] [stderr] warning: target filter specified, but no targets matched; this is a no-op
[INFO] [stderr] Compiling llama_cpp_rs v0.3.0 (/opt/rustwide/workdir)
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./llama.cpp/ggml.c:19672:13: warning: 'ggml_opt_get_grad' defined but not used [-Wunused-function]
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 19672 | static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) {
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ^~~~~~~~~~~~~~~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./llama.cpp/k_quants.c:205:14: warning: 'make_qkx1_quants' defined but not used [-Wunused-function]
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 205 | static float make_qkx1_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, float * restrict the_min,
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ^~~~~~~~~~~~~~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./binding.cpp: In function 'int get_embeddings(void*, void*, float*)':
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./binding.cpp:80:23: warning: 'int llama_eval(llama_context*, llama_token*, int32_t, int)' is deprecated: use llama_decode() instead [-Wdeprecated-declarations]
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 80 | if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past))
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: In file included from ./llama.cpp/common/common.h:5,
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: from ./binding.cpp:1:
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./llama.cpp/llama.h:423:30: note: declared here
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 423 | LLAMA_API DEPRECATED(int llama_eval(
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ^~~~~~~~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./llama.cpp/llama.h:31:36: note: in definition of macro 'DEPRECATED'
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 31 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ^~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./binding.cpp: In function 'int eval(void*, void*, char*)':
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./binding.cpp:138:22: warning: 'int llama_eval(llama_context*, llama_token*, int32_t, int)' is deprecated: use llama_decode() instead [-Wdeprecated-declarations]
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 138 | return llama_eval(ctx, tokens.data(), n_prompt_tokens, n_past);
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./llama.cpp/llama.h:423:30: note: declared here
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 423 | LLAMA_API DEPRECATED(int llama_eval(
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ^~~~~~~~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./llama.cpp/llama.h:31:36: note: in definition of macro 'DEPRECATED'
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 31 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ^~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./binding.cpp: In function 'int llama_predict(void*, void*, char*, bool)':
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./binding.cpp:282:19: warning: 'int llama_eval(llama_context*, llama_token*, int32_t, int)' is deprecated: use llama_decode() instead [-Wdeprecated-declarations]
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 282 | llama_eval(ctx, tmp, 1, 0);
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ~~~~~~~~~~^~~~~~~~~~~~~~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./llama.cpp/llama.h:423:30: note: declared here
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 423 | LLAMA_API DEPRECATED(int llama_eval(
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ^~~~~~~~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./llama.cpp/llama.h:31:36: note: in definition of macro 'DEPRECATED'
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 31 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ^~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./binding.cpp:353:31: warning: 'int llama_eval(llama_context*, llama_token*, int32_t, int)' is deprecated: use llama_decode() instead [-Wdeprecated-declarations]
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 353 | if (llama_eval(ctx, &embd[i], n_eval, n_past))
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./llama.cpp/llama.h:423:30: note: declared here
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 423 | LLAMA_API DEPRECATED(int llama_eval(
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ^~~~~~~~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./llama.cpp/llama.h:31:36: note: in definition of macro 'DEPRECATED'
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 31 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ^~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./binding.cpp:440:49: warning: 'void llama_sample_temperature(llama_context*, llama_token_data_array*, float)' is deprecated: use llama_sample_temp instead [-Wdeprecated-declarations]
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 440 | llama_sample_temperature(ctx, &candidates_p, temp);
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./llama.cpp/llama.h:621:31: note: declared here
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 621 | LLAMA_API DEPRECATED(void llama_sample_temperature(
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ^~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./llama.cpp/llama.h:31:36: note: in definition of macro 'DEPRECATED'
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 31 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ^~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./binding.cpp:446:49: warning: 'void llama_sample_temperature(llama_context*, llama_token_data_array*, float)' is deprecated: use llama_sample_temp instead [-Wdeprecated-declarations]
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 446 | llama_sample_temperature(ctx, &candidates_p, temp);
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./llama.cpp/llama.h:621:31: note: declared here
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 621 | LLAMA_API DEPRECATED(void llama_sample_temperature(
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ^~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./llama.cpp/llama.h:31:36: note: in definition of macro 'DEPRECATED'
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 31 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ^~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./binding.cpp:456:49: warning: 'void llama_sample_temperature(llama_context*, llama_token_data_array*, float)' is deprecated: use llama_sample_temp instead [-Wdeprecated-declarations]
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 456 | llama_sample_temperature(ctx, &candidates_p, temp);
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./llama.cpp/llama.h:621:31: note: declared here
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 621 | LLAMA_API DEPRECATED(void llama_sample_temperature(
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ^~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./llama.cpp/llama.h:31:36: note: in definition of macro 'DEPRECATED'
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 31 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ^~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./binding.cpp:475:42: warning: cast from type 'const char*' to type 'char*' casts away qualifiers [-Wcast-qual]
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 475 | if (!tokenCallback(state_pr, (char*)token_str.c_str()))
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ^~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./binding.cpp: In function 'void* llama_allocate_params(const char*, int, int, int, int, float, float, float, int, bool, bool, int, int, const char**, int, float, float, float, float, int, float, float, bool, const char*, const char*, bool, bool, bool, const char*, const char*, bool)':
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./binding.cpp:629:100: warning: unused parameter 'ignore_eos' [-Wunused-parameter]
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 629 | float top_p, float temp, float repeat_penalty, int repeat_last_n, bool ignore_eos, bool memory_f16, int n_batch, int n_keep, const char **antiprompt, int antiprompt_count,
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ~~~~~^~~~~~~~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./binding.cpp: In function 'void* load_model(const char*, int, int, bool, bool, bool, bool, bool, bool, int, int, const char*, const char*, bool)':
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ./binding.cpp:708:122: warning: unused parameter 'low_vram' [-Wunused-parameter]
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: 708 | void *load_model(const char *fname, int n_ctx, int n_seed, bool memory_f16, bool mlock, bool embeddings, bool mmap, bool low_vram, bool vocab_only, int n_gpu_layers, int n_batch, const char *maingpu, const char *tensorsplit, bool numa)
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: | ~~~~~^~~~~~~~
[INFO] [stderr] warning: llama_cpp_rs@0.3.0: ar: /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama_cpp_rs-33ac1afc6ab3d133/out/llama.cpp/ggml.o: No such file or directory
[INFO] [stderr] error: failed to run custom build command for `llama_cpp_rs v0.3.0 (/opt/rustwide/workdir)`
[INFO] [stderr]
[INFO] [stderr] Caused by:
[INFO] [stderr] process didn't exit successfully: `/opt/rustwide/target/debug/build/llama_cpp_rs-b54b60d96f7e33e3/build-script-build` (exit status: 1)
[INFO] [stderr] --- stdout
[INFO] [stderr] cargo:rerun-if-env-changed=TARGET
[INFO] [stderr] cargo:rerun-if-env-changed=BINDGEN_EXTRA_CLANG_ARGS_x86_64-unknown-linux-gnu
[INFO] [stderr] cargo:rerun-if-env-changed=BINDGEN_EXTRA_CLANG_ARGS_x86_64_unknown_linux_gnu
[INFO] [stderr] cargo:rerun-if-env-changed=BINDGEN_EXTRA_CLANG_ARGS
[INFO] [stderr] cargo:rerun-if-changed=/usr/lib/llvm-18/lib/clang/18/include/stdbool.h
[INFO] [stderr] OUT_DIR = Some(/opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama_cpp_rs-33ac1afc6ab3d133/out)
[INFO] [stderr] OPT_LEVEL = Some(0)
[INFO] [stderr] TARGET = Some(x86_64-unknown-linux-gnu)
[INFO] [stderr] HOST = Some(x86_64-unknown-linux-gnu)
[INFO] [stderr] cargo:rerun-if-env-changed=CC_x86_64-unknown-linux-gnu
[INFO] [stderr] CC_x86_64-unknown-linux-gnu = None
[INFO] [stderr] cargo:rerun-if-env-changed=CC_x86_64_unknown_linux_gnu
[INFO] [stderr] CC_x86_64_unknown_linux_gnu = None
[INFO] [stderr] cargo:rerun-if-env-changed=HOST_CC
[INFO] [stderr] HOST_CC = None
[INFO] [stderr] cargo:rerun-if-env-changed=CC
[INFO] [stderr] CC = None
[INFO] [stderr] cargo:rerun-if-env-changed=CC_ENABLE_DEBUG_OUTPUT
[INFO] [stderr] RUSTC_WRAPPER = None
[INFO] [stderr] cargo:rerun-if-env-changed=CRATE_CC_NO_DEFAULTS
[INFO] [stderr] CRATE_CC_NO_DEFAULTS = None
[INFO] [stderr] DEBUG = Some(true)
[INFO] [stderr] CARGO_CFG_TARGET_FEATURE = Some(fxsr,sse,sse2,x87)
[INFO] [stderr] cargo:rerun-if-env-changed=CFLAGS
[INFO] [stderr] CFLAGS = None
[INFO] [stderr] cargo:rerun-if-env-changed=HOST_CFLAGS
[INFO] [stderr] HOST_CFLAGS = None
[INFO] [stderr] cargo:rerun-if-env-changed=CFLAGS_x86_64_unknown_linux_gnu
[INFO] [stderr] CFLAGS_x86_64_unknown_linux_gnu = None
[INFO] [stderr] cargo:rerun-if-env-changed=CFLAGS_x86_64-unknown-linux-gnu
[INFO] [stderr] CFLAGS_x86_64-unknown-linux-gnu = None
[INFO] [stderr] CARGO_ENCODED_RUSTFLAGS = Some()
[INFO] [stderr] cargo:warning=./llama.cpp/ggml.c:19672:13: warning: 'ggml_opt_get_grad' defined but not used [-Wunused-function]
[INFO] [stderr] cargo:warning=19672 | static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) {
[INFO] [stderr] cargo:warning= | ^~~~~~~~~~~~~~~~~
[INFO] [stderr] cargo:warning=./llama.cpp/k_quants.c:205:14: warning: 'make_qkx1_quants' defined but not used [-Wunused-function]
[INFO] [stderr] cargo:warning= 205 | static float make_qkx1_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, float * restrict the_min,
[INFO] [stderr] cargo:warning= | ^~~~~~~~~~~~~~~~
[INFO] [stderr] cargo:rerun-if-env-changed=AR_x86_64-unknown-linux-gnu
[INFO] [stderr] AR_x86_64-unknown-linux-gnu = None
[INFO] [stderr] cargo:rerun-if-env-changed=AR_x86_64_unknown_linux_gnu
[INFO] [stderr] AR_x86_64_unknown_linux_gnu = None
[INFO] [stderr] cargo:rerun-if-env-changed=HOST_AR
[INFO] [stderr] HOST_AR = None
[INFO] [stderr] cargo:rerun-if-env-changed=AR
[INFO] [stderr] AR = None
[INFO] [stderr] cargo:rerun-if-env-changed=ARFLAGS
[INFO] [stderr] ARFLAGS = None
[INFO] [stderr] cargo:rerun-if-env-changed=HOST_ARFLAGS
[INFO] [stderr] HOST_ARFLAGS = None
[INFO] [stderr] cargo:rerun-if-env-changed=ARFLAGS_x86_64_unknown_linux_gnu
[INFO] [stderr] ARFLAGS_x86_64_unknown_linux_gnu = None
[INFO] [stderr] cargo:rerun-if-env-changed=ARFLAGS_x86_64-unknown-linux-gnu
[INFO] [stderr] ARFLAGS_x86_64-unknown-linux-gnu = None
[INFO] [stderr] cargo:rustc-link-lib=static=ggml
[INFO] [stderr] cargo:rustc-link-search=native=/opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama_cpp_rs-33ac1afc6ab3d133/out
[INFO] [stderr] OUT_DIR = Some(/opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama_cpp_rs-33ac1afc6ab3d133/out)
[INFO] [stderr] OPT_LEVEL = Some(0)
[INFO] [stderr] TARGET = Some(x86_64-unknown-linux-gnu)
[INFO] [stderr] HOST = Some(x86_64-unknown-linux-gnu)
[INFO] [stderr] cargo:rerun-if-env-changed=CXX_x86_64-unknown-linux-gnu
[INFO] [stderr] CXX_x86_64-unknown-linux-gnu = None
[INFO] [stderr] cargo:rerun-if-env-changed=CXX_x86_64_unknown_linux_gnu
[INFO] [stderr] CXX_x86_64_unknown_linux_gnu = None
[INFO] [stderr] cargo:rerun-if-env-changed=HOST_CXX
[INFO] [stderr] HOST_CXX = None
[INFO] [stderr] cargo:rerun-if-env-changed=CXX
[INFO] [stderr] CXX = None
[INFO] [stderr] cargo:rerun-if-env-changed=CC_ENABLE_DEBUG_OUTPUT
[INFO] [stderr] RUSTC_WRAPPER = None
[INFO] [stderr] cargo:rerun-if-env-changed=CRATE_CC_NO_DEFAULTS
[INFO] [stderr] CRATE_CC_NO_DEFAULTS = None
[INFO] [stderr] DEBUG = Some(true)
[INFO] [stderr] CARGO_CFG_TARGET_FEATURE = Some(fxsr,sse,sse2,x87)
[INFO] [stderr] cargo:rerun-if-env-changed=CXXFLAGS
[INFO] [stderr] CXXFLAGS = None
[INFO] [stderr] cargo:rerun-if-env-changed=HOST_CXXFLAGS
[INFO] [stderr] HOST_CXXFLAGS = None
[INFO] [stderr] cargo:rerun-if-env-changed=CXXFLAGS_x86_64_unknown_linux_gnu
[INFO] [stderr] CXXFLAGS_x86_64_unknown_linux_gnu = None
[INFO] [stderr] cargo:rerun-if-env-changed=CXXFLAGS_x86_64-unknown-linux-gnu
[INFO] [stderr] CXXFLAGS_x86_64-unknown-linux-gnu = None
[INFO] [stderr] CARGO_ENCODED_RUSTFLAGS = Some()
[INFO] [stderr] cargo:warning=./binding.cpp: In function 'int get_embeddings(void*, void*, float*)':
[INFO] [stderr] cargo:warning=./binding.cpp:80:23: warning: 'int llama_eval(llama_context*, llama_token*, int32_t, int)' is deprecated: use llama_decode() instead [-Wdeprecated-declarations]
[INFO] [stderr] cargo:warning= 80 | if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past))
[INFO] [stderr] cargo:warning= | ~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] cargo:warning=In file included from ./llama.cpp/common/common.h:5,
[INFO] [stderr] cargo:warning= from ./binding.cpp:1:
[INFO] [stderr] cargo:warning=./llama.cpp/llama.h:423:30: note: declared here
[INFO] [stderr] cargo:warning= 423 | LLAMA_API DEPRECATED(int llama_eval(
[INFO] [stderr] cargo:warning= | ^~~~~~~~~~
[INFO] [stderr] cargo:warning=./llama.cpp/llama.h:31:36: note: in definition of macro 'DEPRECATED'
[INFO] [stderr] cargo:warning= 31 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
[INFO] [stderr] cargo:warning= | ^~~~
[INFO] [stderr] cargo:warning=./binding.cpp: In function 'int eval(void*, void*, char*)':
[INFO] [stderr] cargo:warning=./binding.cpp:138:22: warning: 'int llama_eval(llama_context*, llama_token*, int32_t, int)' is deprecated: use llama_decode() instead [-Wdeprecated-declarations]
[INFO] [stderr] cargo:warning= 138 | return llama_eval(ctx, tokens.data(), n_prompt_tokens, n_past);
[INFO] [stderr] cargo:warning= | ~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] cargo:warning=./llama.cpp/llama.h:423:30: note: declared here
[INFO] [stderr] cargo:warning= 423 | LLAMA_API DEPRECATED(int llama_eval(
[INFO] [stderr] cargo:warning= | ^~~~~~~~~~
[INFO] [stderr] cargo:warning=./llama.cpp/llama.h:31:36: note: in definition of macro 'DEPRECATED'
[INFO] [stderr] cargo:warning= 31 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
[INFO] [stderr] cargo:warning= | ^~~~
[INFO] [stderr] cargo:warning=./binding.cpp: In function 'int llama_predict(void*, void*, char*, bool)':
[INFO] [stderr] cargo:warning=./binding.cpp:282:19: warning: 'int llama_eval(llama_context*, llama_token*, int32_t, int)' is deprecated: use llama_decode() instead [-Wdeprecated-declarations]
[INFO] [stderr] cargo:warning= 282 | llama_eval(ctx, tmp, 1, 0);
[INFO] [stderr] cargo:warning= | ~~~~~~~~~~^~~~~~~~~~~~~~~~
[INFO] [stderr] cargo:warning=./llama.cpp/llama.h:423:30: note: declared here
[INFO] [stderr] cargo:warning= 423 | LLAMA_API DEPRECATED(int llama_eval(
[INFO] [stderr] cargo:warning= | ^~~~~~~~~~
[INFO] [stderr] cargo:warning=./llama.cpp/llama.h:31:36: note: in definition of macro 'DEPRECATED'
[INFO] [stderr] cargo:warning= 31 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
[INFO] [stderr] cargo:warning= | ^~~~
[INFO] [stderr] cargo:warning=./binding.cpp:353:31: warning: 'int llama_eval(llama_context*, llama_token*, int32_t, int)' is deprecated: use llama_decode() instead [-Wdeprecated-declarations]
[INFO] [stderr] cargo:warning= 353 | if (llama_eval(ctx, &embd[i], n_eval, n_past))
[INFO] [stderr] cargo:warning= | ~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] cargo:warning=./llama.cpp/llama.h:423:30: note: declared here
[INFO] [stderr] cargo:warning= 423 | LLAMA_API DEPRECATED(int llama_eval(
[INFO] [stderr] cargo:warning= | ^~~~~~~~~~
[INFO] [stderr] cargo:warning=./llama.cpp/llama.h:31:36: note: in definition of macro 'DEPRECATED'
[INFO] [stderr] cargo:warning= 31 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
[INFO] [stderr] cargo:warning= | ^~~~
[INFO] [stderr] cargo:warning=./binding.cpp:440:49: warning: 'void llama_sample_temperature(llama_context*, llama_token_data_array*, float)' is deprecated: use llama_sample_temp instead [-Wdeprecated-declarations]
[INFO] [stderr] cargo:warning= 440 | llama_sample_temperature(ctx, &candidates_p, temp);
[INFO] [stderr] cargo:warning= | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] cargo:warning=./llama.cpp/llama.h:621:31: note: declared here
[INFO] [stderr] cargo:warning= 621 | LLAMA_API DEPRECATED(void llama_sample_temperature(
[INFO] [stderr] cargo:warning= | ^~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] cargo:warning=./llama.cpp/llama.h:31:36: note: in definition of macro 'DEPRECATED'
[INFO] [stderr] cargo:warning= 31 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
[INFO] [stderr] cargo:warning= | ^~~~
[INFO] [stderr] cargo:warning=./binding.cpp:446:49: warning: 'void llama_sample_temperature(llama_context*, llama_token_data_array*, float)' is deprecated: use llama_sample_temp instead [-Wdeprecated-declarations]
[INFO] [stderr] cargo:warning= 446 | llama_sample_temperature(ctx, &candidates_p, temp);
[INFO] [stderr] cargo:warning= | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] cargo:warning=./llama.cpp/llama.h:621:31: note: declared here
[INFO] [stderr] cargo:warning= 621 | LLAMA_API DEPRECATED(void llama_sample_temperature(
[INFO] [stderr] cargo:warning= | ^~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] cargo:warning=./llama.cpp/llama.h:31:36: note: in definition of macro 'DEPRECATED'
[INFO] [stderr] cargo:warning= 31 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
[INFO] [stderr] cargo:warning= | ^~~~
[INFO] [stderr] cargo:warning=./binding.cpp:456:49: warning: 'void llama_sample_temperature(llama_context*, llama_token_data_array*, float)' is deprecated: use llama_sample_temp instead [-Wdeprecated-declarations]
[INFO] [stderr] cargo:warning= 456 | llama_sample_temperature(ctx, &candidates_p, temp);
[INFO] [stderr] cargo:warning= | ~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] cargo:warning=./llama.cpp/llama.h:621:31: note: declared here
[INFO] [stderr] cargo:warning= 621 | LLAMA_API DEPRECATED(void llama_sample_temperature(
[INFO] [stderr] cargo:warning= | ^~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] cargo:warning=./llama.cpp/llama.h:31:36: note: in definition of macro 'DEPRECATED'
[INFO] [stderr] cargo:warning= 31 | # define DEPRECATED(func, hint) func __attribute__((deprecated(hint)))
[INFO] [stderr] cargo:warning= | ^~~~
[INFO] [stderr] cargo:warning=./binding.cpp:475:42: warning: cast from type 'const char*' to type 'char*' casts away qualifiers [-Wcast-qual]
[INFO] [stderr] cargo:warning= 475 | if (!tokenCallback(state_pr, (char*)token_str.c_str()))
[INFO] [stderr] cargo:warning= | ^~~~~~~~~~~~~~~~~~~~~~~~
[INFO] [stderr] cargo:warning=./binding.cpp: In function 'void* llama_allocate_params(const char*, int, int, int, int, float, float, float, int, bool, bool, int, int, const char**, int, float, float, float, float, int, float, float, bool, const char*, const char*, bool, bool, bool, const char*, const char*, bool)':
[INFO] [stderr] cargo:warning=./binding.cpp:629:100: warning: unused parameter 'ignore_eos' [-Wunused-parameter]
[INFO] [stderr] cargo:warning= 629 | float top_p, float temp, float repeat_penalty, int repeat_last_n, bool ignore_eos, bool memory_f16, int n_batch, int n_keep, const char **antiprompt, int antiprompt_count,
[INFO] [stderr] cargo:warning= | ~~~~~^~~~~~~~~~
[INFO] [stderr] cargo:warning=./binding.cpp: In function 'void* load_model(const char*, int, int, bool, bool, bool, bool, bool, bool, int, int, const char*, const char*, bool)':
[INFO] [stderr] cargo:warning=./binding.cpp:708:122: warning: unused parameter 'low_vram' [-Wunused-parameter]
[INFO] [stderr] cargo:warning= 708 | void *load_model(const char *fname, int n_ctx, int n_seed, bool memory_f16, bool mlock, bool embeddings, bool mmap, bool low_vram, bool vocab_only, int n_gpu_layers, int n_batch, const char *maingpu, const char *tensorsplit, bool numa)
[INFO] [stderr] cargo:warning= | ~~~~~^~~~~~~~
[INFO] [stderr] cargo:rerun-if-env-changed=AR_x86_64-unknown-linux-gnu
[INFO] [stderr] AR_x86_64-unknown-linux-gnu = None
[INFO] [stderr] cargo:rerun-if-env-changed=AR_x86_64_unknown_linux_gnu
[INFO] [stderr] AR_x86_64_unknown_linux_gnu = None
[INFO] [stderr] cargo:rerun-if-env-changed=HOST_AR
[INFO] [stderr] HOST_AR = None
[INFO] [stderr] cargo:rerun-if-env-changed=AR
[INFO] [stderr] AR = None
[INFO] [stderr] cargo:rerun-if-env-changed=ARFLAGS
[INFO] [stderr] ARFLAGS = None
[INFO] [stderr] cargo:rerun-if-env-changed=HOST_ARFLAGS
[INFO] [stderr] HOST_ARFLAGS = None
[INFO] [stderr] cargo:rerun-if-env-changed=ARFLAGS_x86_64_unknown_linux_gnu
[INFO] [stderr] ARFLAGS_x86_64_unknown_linux_gnu = None
[INFO] [stderr] cargo:rerun-if-env-changed=ARFLAGS_x86_64-unknown-linux-gnu
[INFO] [stderr] ARFLAGS_x86_64-unknown-linux-gnu = None
[INFO] [stderr] cargo:warning=ar: /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama_cpp_rs-33ac1afc6ab3d133/out/llama.cpp/ggml.o: No such file or directory
[INFO] [stderr]
[INFO] [stderr] --- stderr
[INFO] [stderr]
[INFO] [stderr]
[INFO] [stderr] error occurred in cc-rs: command did not execute successfully (status code exit status: 1): ZERO_AR_DATE="1" "ar" "cq" "/opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama_cpp_rs-33ac1afc6ab3d133/out/libbinding.a" "/opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama_cpp_rs-33ac1afc6ab3d133/out/7db155ec2d396663-common.o" "/opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama_cpp_rs-33ac1afc6ab3d133/out/f326362f9e96224c-llama.o" "/opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama_cpp_rs-33ac1afc6ab3d133/out/c854047367d9b492-binding.o" "/opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama_cpp_rs-33ac1afc6ab3d133/out/llama.cpp/ggml.o"
[INFO] [stderr]
[INFO] [stderr]
[INFO] running `Command { std: "docker" "inspect" "507910b74a1f89a33612341e2b46f865422a0387331ee38f54357e1ae7a97496", kill_on_drop: false }`
[INFO] running `Command { std: "docker" "rm" "-f" "507910b74a1f89a33612341e2b46f865422a0387331ee38f54357e1ae7a97496", kill_on_drop: false }`
[INFO] [stdout] 507910b74a1f89a33612341e2b46f865422a0387331ee38f54357e1ae7a97496