llama_cpp_2/model/params.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
//! A safe wrapper around `llama_model_params`.
use crate::model::params::kv_overrides::KvOverrides;
use std::ffi::{c_char, CStr};
use std::fmt::{Debug, Formatter};
use std::pin::Pin;
use std::ptr::null;
pub mod kv_overrides;
/// A safe wrapper around `llama_model_params`.
#[allow(clippy::module_name_repetitions)]
pub struct LlamaModelParams {
pub(crate) params: llama_cpp_sys_2::llama_model_params,
kv_overrides: Vec<llama_cpp_sys_2::llama_model_kv_override>,
}
impl Debug for LlamaModelParams {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("LlamaModelParams")
.field("n_gpu_layers", &self.params.n_gpu_layers)
.field("main_gpu", &self.params.main_gpu)
.field("vocab_only", &self.params.vocab_only)
.field("use_mmap", &self.params.use_mmap)
.field("use_mlock", &self.params.use_mlock)
.field("kv_overrides", &"vec of kv_overrides")
.finish()
}
}
impl LlamaModelParams {
/// See [`KvOverrides`]
///
/// # Examples
///
/// ```rust
/// # use llama_cpp_2::model::params::LlamaModelParams;
/// let params = Box::pin(LlamaModelParams::default());
/// let kv_overrides = params.kv_overrides();
/// let count = kv_overrides.into_iter().count();
/// assert_eq!(count, 0);
/// ```
#[must_use]
pub fn kv_overrides(&self) -> KvOverrides {
KvOverrides::new(self)
}
/// Appends a key-value override to the model parameters. It must be pinned as this creates a self-referential struct.
///
/// # Examples
///
/// ```rust
/// # use std::ffi::{CStr, CString};
/// use std::pin::pin;
/// # use llama_cpp_2::model::params::LlamaModelParams;
/// # use llama_cpp_2::model::params::kv_overrides::ParamOverrideValue;
/// let mut params = pin!(LlamaModelParams::default());
/// let key = CString::new("key").expect("CString::new failed");
/// params.as_mut().append_kv_override(&key, ParamOverrideValue::Int(50));
///
/// let kv_overrides = params.kv_overrides().into_iter().collect::<Vec<_>>();
/// assert_eq!(kv_overrides.len(), 1);
///
/// let (k, v) = &kv_overrides[0];
/// assert_eq!(v, &ParamOverrideValue::Int(50));
///
/// assert_eq!(k.to_bytes(), b"key", "expected key to be 'key', was {:?}", k);
/// ```
#[allow(clippy::missing_panics_doc)] // panics are just to enforce internal invariants, not user errors
pub fn append_kv_override(
mut self: Pin<&mut Self>,
key: &CStr,
value: kv_overrides::ParamOverrideValue,
) {
let kv_override = self
.kv_overrides
.get_mut(0)
.expect("kv_overrides did not have a next allocated");
assert_eq!(kv_override.key[0], 0, "last kv_override was not empty");
// There should be some way to do this without iterating over everything.
for (i, &c) in key.to_bytes_with_nul().iter().enumerate() {
kv_override.key[i] = c_char::try_from(c).expect("invalid character in key");
}
kv_override.tag = value.tag();
kv_override.__bindgen_anon_1 = value.value();
// set to null pointer for panic safety (as push may move the vector, invalidating the pointer)
self.params.kv_overrides = null();
// push the next one to ensure we maintain the iterator invariant of ending with a 0
self.kv_overrides
.push(llama_cpp_sys_2::llama_model_kv_override {
key: [0; 128],
tag: 0,
__bindgen_anon_1: llama_cpp_sys_2::llama_model_kv_override__bindgen_ty_1 {
val_i64: 0,
},
});
// set the pointer to the (potentially) new vector
self.params.kv_overrides = self.kv_overrides.as_ptr();
eprintln!("saved ptr: {:?}", self.params.kv_overrides);
}
}
impl LlamaModelParams {
/// Get the number of layers to offload to the GPU.
#[must_use]
pub fn n_gpu_layers(&self) -> i32 {
self.params.n_gpu_layers
}
/// The GPU that is used for scratch and small tensors
#[must_use]
pub fn main_gpu(&self) -> i32 {
self.params.main_gpu
}
/// only load the vocabulary, no weights
#[must_use]
pub fn vocab_only(&self) -> bool {
self.params.vocab_only
}
/// use mmap if possible
#[must_use]
pub fn use_mmap(&self) -> bool {
self.params.use_mmap
}
/// force system to keep model in RAM
#[must_use]
pub fn use_mlock(&self) -> bool {
self.params.use_mlock
}
/// sets the number of gpu layers to offload to the GPU.
/// ```
/// # use llama_cpp_2::model::params::LlamaModelParams;
/// let params = LlamaModelParams::default();
/// let params = params.with_n_gpu_layers(1);
/// assert_eq!(params.n_gpu_layers(), 1);
/// ```
#[must_use]
pub fn with_n_gpu_layers(mut self, n_gpu_layers: u32) -> Self {
// The only way this conversion can fail is if u32 overflows the i32 - in which case we set
// to MAX
let n_gpu_layers = i32::try_from(n_gpu_layers).unwrap_or(i32::MAX);
self.params.n_gpu_layers = n_gpu_layers;
self
}
/// sets the main GPU
#[must_use]
pub fn with_main_gpu(mut self, main_gpu: i32) -> Self {
self.params.main_gpu = main_gpu;
self
}
/// sets `vocab_only`
#[must_use]
pub fn with_vocab_only(mut self, vocab_only: bool) -> Self {
self.params.vocab_only = vocab_only;
self
}
/// sets `use_mlock`
#[must_use]
pub fn with_use_mlock(mut self, use_mlock: bool) -> Self {
self.params.use_mlock = use_mlock;
self
}
}
/// Default parameters for `LlamaModel`. (as defined in llama.cpp by `llama_model_default_params`)
/// ```
/// # use llama_cpp_2::model::params::LlamaModelParams;
/// let params = LlamaModelParams::default();
/// #[cfg(not(target_os = "macos"))]
/// assert_eq!(params.n_gpu_layers(), 0, "n_gpu_layers should be 0");
/// #[cfg(target_os = "macos")]
/// assert_eq!(params.n_gpu_layers(), 999, "n_gpu_layers should be 999");
/// assert_eq!(params.main_gpu(), 0, "main_gpu should be 0");
/// assert_eq!(params.vocab_only(), false, "vocab_only should be false");
/// assert_eq!(params.use_mmap(), true, "use_mmap should be true");
/// assert_eq!(params.use_mlock(), false, "use_mlock should be false");
/// ```
impl Default for LlamaModelParams {
fn default() -> Self {
let default_params = unsafe { llama_cpp_sys_2::llama_model_default_params() };
LlamaModelParams {
params: default_params,
// push the next one to ensure we maintain the iterator invariant of ending with a 0
kv_overrides: vec![llama_cpp_sys_2::llama_model_kv_override {
key: [0; 128],
tag: 0,
__bindgen_anon_1: llama_cpp_sys_2::llama_model_kv_override__bindgen_ty_1 {
val_i64: 0,
},
}],
}
}
}