Build #1690872 2025-01-30T18:28:23.794842+00:00
# rustc version
rustc 1.86.0-nightly (ae5de6c75 2025-01-29)# docs.rs version
docsrs 0.6.0 (29a0e81b 2025-01-22)# build log
[INFO] running `Command { std: "docker" "create" "-v" "/home/cratesfyi/workspace/builds/llama-cpp-sys-2-0.1.91/target:/opt/rustwide/target:rw,Z" "-v" "/home/cratesfyi/workspace/builds/llama-cpp-sys-2-0.1.91/source:/opt/rustwide/workdir:ro,Z" "-v" "/home/cratesfyi/workspace/cargo-home:/opt/rustwide/cargo-home:ro,Z" "-v" "/home/cratesfyi/workspace/rustup-home:/opt/rustwide/rustup-home:ro,Z" "-e" "SOURCE_DIR=/opt/rustwide/workdir" "-e" "CARGO_TARGET_DIR=/opt/rustwide/target" "-e" "DOCS_RS=1" "-e" "CARGO_HOME=/opt/rustwide/cargo-home" "-e" "RUSTUP_HOME=/opt/rustwide/rustup-home" "-w" "/opt/rustwide/workdir" "-m" "6442450944" "--cpus" "6" "--user" "1001:1001" "--network" "none" "ghcr.io/rust-lang/crates-build-env/linux@sha256:c80049f3b88b82089a44e0f06d0d6029d44b96b7257e55a1cd63dbc9f4c33334" "/opt/rustwide/cargo-home/bin/cargo" "+nightly" "rustdoc" "--lib" "-Zrustdoc-map" "--config" "build.rustdocflags=[\"--cfg\", \"docsrs\", \"-Z\", \"unstable-options\", \"--emit=invocation-specific\", \"--resource-suffix\", \"-20250129-1.86.0-nightly-ae5de6c75\", \"--static-root-path\", \"/-/rustdoc.static/\", \"--cap-lints\", \"warn\", \"--extern-html-root-takes-precedence\"]" "--offline" "-Zunstable-options" "--config=doc.extern-map.registries.crates-io=\"https://docs.rs/{pkg_name}/{version}/x86_64-unknown-linux-gnu\"" "-Zrustdoc-scrape-examples" "-j6" "--target" "x86_64-unknown-linux-gnu", kill_on_drop: false }`
[INFO] [stderr] WARNING: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.
[INFO] [stdout] 14337c64020ef5719a06f261c1e5982323709cc1cb81c7f69d54550629caf499
[INFO] running `Command { std: "docker" "start" "-a" "14337c64020ef5719a06f261c1e5982323709cc1cb81c7f69d54550629caf499", kill_on_drop: false }`
[INFO] [stderr] warning: target filter specified, but no targets matched; this is a no-op
[INFO] [stderr] Compiling llama-cpp-sys-2 v0.1.91 (/opt/rustwide/workdir)
[INFO] [stderr] Documenting llama-cpp-sys-2 v0.1.91 (/opt/rustwide/workdir)
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-2-fd0c83dde9f41e2b/out/bindings.rs:6098:13
[INFO] [stderr] |
[INFO] [stderr] 6098 | ... = " Apply chat template. Inspired by hf apply_chat_template() on python.\n Both \"model\" and \"custom_template\" are optional, but at least one is required. \"custom_template\" has higher precedence than \"model\"\n NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template\n @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead.\n @param chat Pointer to a list of multiple llama_chat_message\n @param n_msg Number of llama_chat_message in this chat\n @param add_ass Whether to end the prompt with the token(s) that indicate the start of an assistant message.\n @param buf A buffer to hold the output formatted prompt. The recommended alloc size is 2 * (total number of characters of all messages)\n @param length The size of the allocated buffer\n @return The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template...
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] = note: `#[warn(rustdoc::bare_urls)]` on by default
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 6098 | #[doc = <" Apply chat template. Inspired by hf apply_chat_template() on python.\n Both \"model\" and \"custom_template\" are optional, but at least one is required. \"custom_template\" has higher precedence than \"model\"\n NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template\n @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’s default chat template will be used instead.\n @param chat Pointer to a list of multiple llama_chat_message\n @param n_msg Number of llama_chat_message in this chat\n @param add_ass Whether to end the prompt with the token(s) that indicate the start of an assistant message.\n @param buf A buffer to hold the output formatted prompt. The recommended alloc size is 2 * (total number of characters of all messages)\n @param length The size of the allocated buffer\n @return The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template.">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-2-fd0c83dde9f41e2b/out/bindings.rs:6291:13
[INFO] [stderr] |
[INFO] [stderr] 6291 | ... = " @details Top-K sampling described in academic paper \"The Curious Case of Neural Text Degeneration\" https://arxiv.org/abs/1904.0975...
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 6291 | #[doc = <" @details Top-K sampling described in academic paper \"The Curious Case of Neural Text Degeneration\" https://arxiv.org/abs/1904.09751">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-2-fd0c83dde9f41e2b/out/bindings.rs:6295:13
[INFO] [stderr] |
[INFO] [stderr] 6295 | ... = " @details Nucleus sampling described in academic paper \"The Curious Case of Neural Text Degeneration\" https://arxiv.org/abs/1904.0975...
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 6295 | #[doc = <" @details Nucleus sampling described in academic paper \"The Curious Case of Neural Text Degeneration\" https://arxiv.org/abs/1904.09751">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-2-fd0c83dde9f41e2b/out/bindings.rs:6299:13
[INFO] [stderr] |
[INFO] [stderr] 6299 | #[doc = " @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841"]
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 6299 | #[doc = <" @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-2-fd0c83dde9f41e2b/out/bindings.rs:6303:13
[INFO] [stderr] |
[INFO] [stderr] 6303 | #[doc = " @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666."]
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 6303 | #[doc = <" @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-2-fd0c83dde9f41e2b/out/bindings.rs:6311:13
[INFO] [stderr] |
[INFO] [stderr] 6311 | #[doc = " @details Dynamic temperature implementation (a.k.a. entropy) described in the paper https://arxiv.org/abs/2309.02772."]
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 6311 | #[doc = <" @details Dynamic temperature implementation (a.k.a. entropy) described in the paper https://arxiv.org/abs/2309.02772.">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-2-fd0c83dde9f41e2b/out/bindings.rs:6315:13
[INFO] [stderr] |
[INFO] [stderr] 6315 | #[doc = " @details XTC sampler as described in https://github.com/oobabooga/text-generation-webui/pull/6335"]
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 6315 | #[doc = <" @details XTC sampler as described in https://github.com/oobabooga/text-generation-webui/pull/6335">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-2-fd0c83dde9f41e2b/out/bindings.rs:6320:13
[INFO] [stderr] |
[INFO] [stderr] 6320 | ... = " @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.\n @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.\n @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.\n @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.\n @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.\n @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal...
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 6320 | #[doc = <" @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.\n @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.\n @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.\n @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.\n @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.\n @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-2-fd0c83dde9f41e2b/out/bindings.rs:6330:13
[INFO] [stderr] |
[INFO] [stderr] 6330 | ... = " @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.\n @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.\n @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.\n @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.\n @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal...
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 6330 | #[doc = <" @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.\n @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.\n @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.\n @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.\n @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: this URL is not a hyperlink
[INFO] [stderr] --> /opt/rustwide/target/x86_64-unknown-linux-gnu/debug/build/llama-cpp-sys-2-fd0c83dde9f41e2b/out/bindings.rs:6350:13
[INFO] [stderr] |
[INFO] [stderr] 6350 | ... = " @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/98...
[INFO] [stderr] | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
[INFO] [stderr] |
[INFO] [stderr] = note: bare URLs are not automatically turned into clickable links
[INFO] [stderr] help: use an automatic link instead
[INFO] [stderr] |
[INFO] [stderr] 6350 | #[doc = <" @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/982">]
[INFO] [stderr] | + +
[INFO] [stderr]
[INFO] [stderr] warning: `llama-cpp-sys-2` (lib doc) generated 10 warnings (run `cargo fix --lib -p llama-cpp-sys-2` to apply 10 suggestions)
[INFO] [stderr] Finished `dev` profile [unoptimized + debuginfo] target(s) in 1.80s
[INFO] [stderr] Generated /opt/rustwide/target/x86_64-unknown-linux-gnu/doc/llama_cpp_sys_2/index.html
[INFO] running `Command { std: "docker" "inspect" "14337c64020ef5719a06f261c1e5982323709cc1cb81c7f69d54550629caf499", kill_on_drop: false }`
[INFO] running `Command { std: "docker" "rm" "-f" "14337c64020ef5719a06f261c1e5982323709cc1cb81c7f69d54550629caf499", kill_on_drop: false }`
[INFO] [stdout] 14337c64020ef5719a06f261c1e5982323709cc1cb81c7f69d54550629caf499