Build #1614804 2025-01-09T22:33:46.994306+00:00
# rustc version
rustc 1.86.0-nightly (a580b5c37 2025-01-08)# docs.rs version
docsrs 0.6.0 (9368f26f 2025-01-09)# build log
[INFO] running `Command { std: "docker" "create" "-v" "/home/cratesfyi/workspace/builds/llama-cpp-sys-4-0.1.91/target:/opt/rustwide/target:rw,Z" "-v" "/home/cratesfyi/workspace/builds/llama-cpp-sys-4-0.1.91/source:/opt/rustwide/workdir:ro,Z" "-v" "/home/cratesfyi/workspace/cargo-home:/opt/rustwide/cargo-home:ro,Z" "-v" "/home/cratesfyi/workspace/rustup-home:/opt/rustwide/rustup-home:ro,Z" "-e" "SOURCE_DIR=/opt/rustwide/workdir" "-e" "CARGO_TARGET_DIR=/opt/rustwide/target" "-e" "DOCS_RS=1" "-e" "CARGO_HOME=/opt/rustwide/cargo-home" "-e" "RUSTUP_HOME=/opt/rustwide/rustup-home" "-w" "/opt/rustwide/workdir" "-m" "6442450944" "--cpus" "6" "--user" "1001:1001" "--network" "none" "ghcr.io/rust-lang/crates-build-env/linux@sha256:f3a9d4ad9d972b27faf3965f35b62e55ba32bbce8f20bc8fe909558a86702fde" "/opt/rustwide/cargo-home/bin/cargo" "+nightly" "rustdoc" "--lib" "-Zrustdoc-map" "--config" "build.rustdocflags=[\"--cfg\", \"docsrs\", \"-Z\", \"unstable-options\", \"--emit=invocation-specific\", \"--resource-suffix\", \"-20250108-1.86.0-nightly-a580b5c37\", \"--static-root-path\", \"/-/rustdoc.static/\", \"--cap-lints\", \"warn\", \"--extern-html-root-takes-precedence\"]" "--offline" "-Zunstable-options" "--config=doc.extern-map.registries.crates-io=\"https://docs.rs/{pkg_name}/{version}/x86_64-unknown-linux-gnu\"" "-Zrustdoc-scrape-examples" "-j6" "--target" "x86_64-unknown-linux-gnu", kill_on_drop: false }`
[INFO] [stdout] a3d93051cce0de1fa1abc1c20bfb0c81de3c4a54f30f081e5daee87d18f01b28
[INFO] [stderr] WARNING: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.
[INFO] running `Command { std: "docker" "start" "-a" "a3d93051cce0de1fa1abc1c20bfb0c81de3c4a54f30f081e5daee87d18f01b28", kill_on_drop: false }`
[INFO] [stderr] warning: target filter specified, but no targets matched; this is a no-op
[INFO] [stderr] Compiling llama-cpp-sys-4 v0.1.91 (/opt/rustwide/workdir)
[INFO] [stderr] Documenting llama-cpp-sys-4 v0.1.91 (/opt/rustwide/workdir)
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-4-fcce2f2f70b6ab6c/out/bindings.rs:4293:13
[INFO] [stderr] |
[INFO] [stderr] 4293 | ... = " Apply chat template. Inspired by hf apply_chat_template() on python.\n Both \"model\" and \"custom_template\" are optional, but at least one is required. \"custom_template\" has higher precedence than \"model\"\n NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template\n @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead.\n @param chat Pointer to a list of multiple llama_chat_message\n @param n_msg Number of llama_chat_message in this chat\n @param add_ass Whether to end the prompt with the token(s) that indicate the start of an assistant message.\n @param buf A buffer to hold the output formatted prompt. The recommended alloc size is 2 * (total number of characters of all messages)\n @param length The size of the allocated buffer\n @return The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template...
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] = note: `#[warn(rustdoc::bare_urls)]` on by default
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 4293 | #[doc = <" Apply chat template. Inspired by hf apply_chat_template() on python.\n Both \"model\" and \"custom_template\" are optional, but at least one is required. \"custom_template\" has higher precedence than \"model\"\n NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template\n @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead.\n @param chat Pointer to a list of multiple llama_chat_message\n @param n_msg Number of llama_chat_message in this chat\n @param add_ass Whether to end the prompt with the token(s) that indicate the start of an assistant message.\n @param buf A buffer to hold the output formatted prompt. The recommended alloc size is 2 * (total number of characters of all messages)\n @param length The size of the allocated buffer\n @return The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template.">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-4-fcce2f2f70b6ab6c/out/bindings.rs:4400:13
[INFO] [stderr] |
[INFO] [stderr] 4400 | ... = " @details Top-K sampling described in academic paper \"The Curious Case of Neural Text Degeneration\" https://arxiv.org/abs/1904.0975...
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 4400 | #[doc = <" @details Top-K sampling described in academic paper \"The Curious Case of Neural Text Degeneration\" https://arxiv.org/abs/1904.09751">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-4-fcce2f2f70b6ab6c/out/bindings.rs:4404:13
[INFO] [stderr] |
[INFO] [stderr] 4404 | ... = " @details Nucleus sampling described in academic paper \"The Curious Case of Neural Text Degeneration\" https://arxiv.org/abs/1904.0975...
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 4404 | #[doc = <" @details Nucleus sampling described in academic paper \"The Curious Case of Neural Text Degeneration\" https://arxiv.org/abs/1904.09751">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-4-fcce2f2f70b6ab6c/out/bindings.rs:4408:13
[INFO] [stderr] |
[INFO] [stderr] 4408 | #[doc = " @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841"]
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 4408 | #[doc = <" @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-4-fcce2f2f70b6ab6c/out/bindings.rs:4412:13
[INFO] [stderr] |
[INFO] [stderr] 4412 | #[doc = " @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666."]
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 4412 | #[doc = <" @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-4-fcce2f2f70b6ab6c/out/bindings.rs:4420:13
[INFO] [stderr] |
[INFO] [stderr] 4420 | #[doc = " @details Dynamic temperature implementation (a.k.a. entropy) described in the paper https://arxiv.org/abs/2309.02772."]
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 4420 | #[doc = <" @details Dynamic temperature implementation (a.k.a. entropy) described in the paper https://arxiv.org/abs/2309.02772.">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-4-fcce2f2f70b6ab6c/out/bindings.rs:4424:13
[INFO] [stderr] |
[INFO] [stderr] 4424 | #[doc = " @details XTC sampler as described in https://github.com/oobabooga/text-generation-webui/pull/6335"]
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 4424 | #[doc = <" @details XTC sampler as described in https://github.com/oobabooga/text-generation-webui/pull/6335">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-4-fcce2f2f70b6ab6c/out/bindings.rs:4429:13
[INFO] [stderr] |
[INFO] [stderr] 4429 | ... = " @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.\n @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.\n @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.\n @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.\n @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.\n @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal...
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 4429 | #[doc = <" @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.\n @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.\n @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.\n @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.\n @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.\n @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-4-fcce2f2f70b6ab6c/out/bindings.rs:4439:13
[INFO] [stderr] |
[INFO] [stderr] 4439 | ... = " @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.\n @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.\n @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.\n @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.\n @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal...
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 4439 | #[doc = <" @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.\n @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.\n @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.\n @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.\n @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-4-fcce2f2f70b6ab6c/out/bindings.rs:4459:13
[INFO] [stderr] |
[INFO] [stderr] 4459 | ... = " @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/98...
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 4459 | #[doc = <" @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/982">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: `llama-cpp-sys-4` (lib doc) generated 10 warnings (run `cargo fix --lib -p llama-cpp-sys-4` to apply 10 suggestions)
[INFO] [stderr] Finished `dev` profile [unoptimized + debuginfo] target(s) in 3.09s
[INFO] [stderr] Generated /opt/rustwide/target/x86_64-unknown-linux-gnu/doc/llama_cpp_sys_4/index.html
[INFO] running `Command { std: "docker" "inspect" "a3d93051cce0de1fa1abc1c20bfb0c81de3c4a54f30f081e5daee87d18f01b28", kill_on_drop: false }`
[INFO] running `Command { std: "docker" "rm" "-f" "a3d93051cce0de1fa1abc1c20bfb0c81de3c4a54f30f081e5daee87d18f01b28", kill_on_drop: false }`
[INFO] [stdout] a3d93051cce0de1fa1abc1c20bfb0c81de3c4a54f30f081e5daee87d18f01b28