Struct LlamaModel

Source
pub struct LlamaModel { /* private fields */ }
Expand description

A safe wrapper around llama_model.

Implementations§

Source§

impl LlamaModel

Source

pub fn n_ctx_train(&self) -> u32

get the number of tokens the model was trained on

§Panics

If the number of tokens the model was trained on does not fit into an u32. This should be impossible on most platforms due to llama.cpp returning a c_int (i32 on most platforms) which is almost certainly positive.

Source

pub fn tokens( &self, special: Special, ) -> impl Iterator<Item = (LlamaToken, Result<String, TokenToStringError>)> + '_

Get all tokens in the model.

Source

pub fn token_bos(&self) -> LlamaToken

Get the beginning of stream token.

Source

pub fn token_eos(&self) -> LlamaToken

Get the end of stream token.

Source

pub fn token_nl(&self) -> LlamaToken

Get the newline token.

Source

pub fn is_eog_token(&self, token: LlamaToken) -> bool

Check if a token represents the end of generation (end of turn, end of sequence, etc.)

Source

pub fn decode_start_token(&self) -> LlamaToken

Get the decoder start token.

Source

pub fn token_to_str( &self, token: LlamaToken, special: Special, ) -> Result<String, TokenToStringError>

Convert single token to a string.

§Errors

See TokenToStringError for more information.

Source

pub fn token_to_bytes( &self, token: LlamaToken, special: Special, ) -> Result<Vec<u8>, TokenToStringError>

Convert single token to bytes.

§Errors

See TokenToStringError for more information.

§Panics

If a TokenToStringError::InsufficientBufferSpace error returned by Self::token_to_bytes_with_size contains a positive nonzero value. This should never happen.

Source

pub fn tokens_to_str( &self, tokens: &[LlamaToken], special: Special, ) -> Result<String, TokenToStringError>

Convert a vector of tokens to a single string.

§Errors

See TokenToStringError for more information.

Source

pub fn str_to_token( &self, str: &str, add_bos: AddBos, ) -> Result<Vec<LlamaToken>, StringToTokenError>

Convert a string to a Vector of tokens.

§Errors
  • if str contains a null byte.
§Panics
use llama_cpp_2::model::LlamaModel;

use std::path::Path;
use llama_cpp_2::model::AddBos;
let backend = llama_cpp_2::llama_backend::LlamaBackend::init()?;
let model = LlamaModel::load_from_file(&backend, Path::new("path/to/model"), &Default::default())?;
let tokens = model.str_to_token("Hello, World!", AddBos::Always)?;
Source

pub fn token_attr(&self, LlamaToken: LlamaToken) -> LlamaTokenAttrs

Get the type of a token.

§Panics

If the token type is not known to this library.

Source

pub fn token_to_str_with_size( &self, token: LlamaToken, buffer_size: usize, special: Special, ) -> Result<String, TokenToStringError>

Convert a token to a string with a specified buffer size.

Generally you should use LlamaModel::token_to_str as it is able to decode tokens with any length.

§Errors
  • if the token type is unknown
  • the resultant token is larger than buffer_size.
  • the string returend by llama-cpp is not valid utf8.
§Panics
  • if buffer_size does not fit into a c_int.
  • if the returned size from llama-cpp does not fit into a usize. (this should never happen)
Source

pub fn token_to_bytes_with_size( &self, token: LlamaToken, buffer_size: usize, special: Special, lstrip: Option<NonZeroU16>, ) -> Result<Vec<u8>, TokenToStringError>

Convert a token to bytes with a specified buffer size.

Generally you should use LlamaModel::token_to_bytes as it is able to handle tokens of any length.

§Errors
  • if the token type is unknown
  • the resultant token is larger than buffer_size.
§Panics
  • if buffer_size does not fit into a c_int.
  • if the returned size from llama-cpp does not fit into a usize. (this should never happen)
Source

pub fn n_vocab(&self) -> i32

The number of tokens the model was trained on.

This returns a c_int for maximum compatibility. Most of the time it can be cast to an i32 without issue.

Source

pub fn vocab_type(&self) -> VocabType

The type of vocab the model was trained on.

§Panics

If llama-cpp emits a vocab type that is not known to this library.

Source

pub fn n_embd(&self) -> c_int

This returns a c_int for maximum compatibility. Most of the time it can be cast to an i32 without issue.

Source

pub fn get_chat_template( &self, buf_size: usize, ) -> Result<String, ChatTemplateError>

Get chat template from model.

§Errors
  • If the model has no chat template
  • If the chat template is not a valid CString.
Source

pub fn load_from_file( _: &LlamaBackend, path: impl AsRef<Path>, params: &LlamaModelParams, ) -> Result<Self, LlamaModelLoadError>

Loads a model from a file.

§Errors

See LlamaModelLoadError for more information.

Source

pub fn lora_adapter_init( &self, path: impl AsRef<Path>, ) -> Result<LlamaLoraAdapter, LlamaLoraAdapterInitError>

Initializes a lora adapter from a file.

§Errors

See LlamaLoraAdapterInitError for more information.

Source

pub fn new_context( &self, _: &LlamaBackend, params: LlamaContextParams, ) -> Result<LlamaContext<'_>, LlamaContextLoadError>

Create a new context from this model.

§Errors

There is many ways this can fail. See LlamaContextLoadError for more information.

Source

pub fn apply_chat_template( &self, tmpl: Option<String>, chat: Vec<LlamaChatMessage>, add_ass: bool, ) -> Result<String, ApplyChatTemplateError>

Apply the models chat template to some messages. See https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template

tmpl of None means to use the default template provided by llama.cpp for the model

§Errors

There are many ways this can fail. See ApplyChatTemplateError for more information.

Trait Implementations§

Source§

impl Debug for LlamaModel

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl Drop for LlamaModel

Source§

fn drop(&mut self)

Executes the destructor for this type. Read more
Source§

impl Send for LlamaModel

Source§

impl Sync for LlamaModel

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T> Instrument for T

Source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
Source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<T> WithSubscriber for T

Source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more