pub struct LlamaContext<'a> {
    pub model: &'a LlamaModel,
    /* private fields */
}
Expand description

Safe wrapper around llama_context.

Fields§

§model: &'a LlamaModel

a reference to the contexts model.

Implementations§

source§

impl LlamaContext<'_>

source

pub fn copy_cache(&mut self, src: i32, dest: i32, size: i32)

Copy the cache from one sequence to another.

§Parameters
  • src - The sequence id to copy the cache from.
  • dest - The sequence id to copy the cache to.
  • size - The size of the cache to copy.
source

pub fn copy_kv_cache_seq( &mut self, src: i32, dest: i32, p0: Option<u16>, p1: Option<u16> )

Copy the cache from one sequence to another.

§Parameters
  • src - The sequence id to copy the cache from.
  • dest - The sequence id to copy the cache to.
  • p0 - The start position of the cache to clear. If None, the entire cache is copied up to [p1].
  • p1 - The end position of the cache to clear. If None, the entire cache is copied starting from [p0].
source

pub fn clear_kv_cache_seq(&mut self, src: i32, p0: Option<u16>, p1: Option<u16>)

Clear the kv cache for the given sequence.

§Parameters
  • src - The sequence id to clear the cache for.
  • p0 - The start position of the cache to clear. If None, the entire cache is cleared up to [p1].
  • p1 - The end position of the cache to clear. If None, the entire cache is cleared from [p0].
source

pub fn get_kv_cache_used_cells(&self) -> i32

Returns the number of used KV cells (i.e. have at least one sequence assigned to them)

source

pub fn clear_kv_cache(&mut self)

Clear the KV cache

source

pub fn llama_kv_cache_seq_keep(&mut self, seq_id: i32)

Removes all tokens that do not belong to the specified sequence

§Parameters
  • seq_id - The sequence id to keep
source

pub fn kv_cache_seq_add( &mut self, seq_id: i32, p0: Option<u16>, p1: Option<u16>, delta: i32 )

Adds relative position “delta” to all tokens that belong to the specified sequence and have positions in [p0, p1) If the KV cache is RoPEd, the KV data is updated accordingly:

§Parameters
  • seq_id - The sequence id to update
  • p0 - The start position of the cache to update. If None, the entire cache is updated up to [p1].
  • p1 - The end position of the cache to update. If None, the entire cache is updated starting from [p0].
  • delta - The relative position to add to the tokens
source

pub fn kv_cache_seq_div( &mut self, seq_id: i32, p0: Option<u16>, p1: Option<u16>, d: NonZeroU8 )

Integer division of the positions by factor of d > 1 If the KV cache is RoPEd, the KV data is updated accordingly:

§Parameters
  • seq_id - The sequence id to update
  • p0 - The start position of the cache to update. If None, the entire cache is updated up to [p1].
  • p1 - The end position of the cache to update. If None, the entire cache is updated starting from [p0].
  • d - The factor to divide the positions by
source

pub fn kv_cache_seq_pos_max(&self, seq_id: i32) -> i32

Returns the largest position present in the KV cache for the specified sequence

§Parameters
  • seq_id - The sequence id to get the max position for
source

pub fn kv_cache_defrag(&mut self)

Defragment the KV cache This will be applied:

source

pub fn kv_cache_update(&mut self)

Apply the KV cache updates (such as K-shifts, defragmentation, etc.)

source

pub fn get_kv_cache_token_count(&self) -> i32

Returns the number of tokens in the KV cache (slow, use only for debug) If a KV cell has multiple sequences assigned to it, it will be counted multiple times

source

pub fn new_kv_cache_view(&self, n_max_seq: i32) -> KVCacheView<'_>

Create an empty KV cache view. (use only for debugging purposes)

§Parameters
  • n_max_seq - Maximum number of sequences that can exist in a cell. It’s not an error if there are more sequences in a cell than this value, however they will not be visible in the view cells_sequences.
source§

impl LlamaContext<'_>

source

pub fn sample(&mut self, sampler: Sampler<'_>) -> LlamaToken

👎Deprecated since 0.1.32: this does not scale well with many params and does not allow for changing of orders.

Sample a token.

§Panics
  • sampler contains no tokens
source

pub fn grammar_accept_token( &mut self, grammar: &mut LlamaGrammar, token: LlamaToken )

Accept a token into the grammar.

source

pub fn sample_grammar( &mut self, llama_token_data_array: &mut LlamaTokenDataArray, llama_grammar: &LlamaGrammar )

Perform grammar sampling.

source

pub fn sample_temp( &self, token_data: &mut LlamaTokenDataArray, temperature: f32 )

Modify [token_data] in place using temperature sampling.

§Panics
  • [temperature] is not between 0.0 and 1.0
source

pub fn sample_token_greedy(&self, token_data: LlamaTokenDataArray) -> LlamaToken

Sample a token greedily.

§Panics
  • [token_data] is empty
source

pub fn sample_tail_free( &self, token_data: &mut LlamaTokenDataArray, z: f32, min_keep: usize )

Tail Free Sampling described in Tail-Free-Sampling.

source

pub fn sample_typical( &self, token_data: &mut LlamaTokenDataArray, p: f32, min_keep: usize )

Locally Typical Sampling implementation described in the paper.

source

pub fn sample_top_p( &self, token_data: &mut LlamaTokenDataArray, p: f32, min_keep: usize )

Nucleus sampling described in academic paper The Curious Case of Neural Text Degeneration

source

pub fn sample_min_p( &self, llama_token_data: &mut LlamaTokenDataArray, p: f32, min_keep: usize )

Minimum P sampling as described in #3841

source

pub fn sample_top_k( &self, token_data: &mut LlamaTokenDataArray, k: i32, min_keep: usize )

Top-K sampling described in academic paper The Curious Case of Neural Text Degeneration

source

pub fn sample_token_softmax(&self, token_data: &mut LlamaTokenDataArray)

Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.

source

pub fn sample_repetition_penalty( &mut self, token_data: &mut LlamaTokenDataArray, last_tokens: &[LlamaToken], penalty_last_n: usize, penalty_repeat: f32, penalty_freq: f32, penalty_present: f32 )

source§

impl LlamaContext<'_>

source

pub fn save_session_file( &self, path_session: impl AsRef<Path>, tokens: &[LlamaToken] ) -> Result<(), SaveSessionError>

Save the current session to a file.

§Parameters
  • path_session - The file to save to.
  • tokens - The tokens to associate the session with. This should be a prefix of a sequence of tokens that the context has processed, so that the relevant KV caches are already filled.
§Errors

Fails if the path is not a valid utf8, is not a valid c string, or llama.cpp fails to save the session file.

source

pub fn load_session_file( &mut self, path_session: impl AsRef<Path>, max_tokens: usize ) -> Result<Vec<LlamaToken>, LoadSessionError>

Load a session file into the current context.

You still need to pass the returned tokens to the context for inference to work. What this function buys you is that the KV caches are already filled with the relevant data.

§Parameters
  • path_session - The file to load from. It must be a session file from a compatible context, otherwise the function will error.
  • max_tokens - The maximum token length of the loaded session. If the session was saved with a longer length, the function will error.
§Errors

Fails if the path is not a valid utf8, is not a valid c string, or llama.cpp fails to load the session file. (e.g. the file does not exist, is not a session file, etc.)

source

pub fn get_state_size(&self) -> usize

Returns the maximum size in bytes of the state (rng, logits, embedding and kv_cache) - will often be smaller after compacting tokens

source

pub unsafe fn copy_state_data(&self, dest: *mut u8) -> usize

Copies the state to the specified destination address.

Returns the number of bytes copied

§Safety

Destination needs to have allocated enough memory.

source

pub unsafe fn set_state_data(&mut self, src: &[u8]) -> usize

Set the state reading from the specified address Returns the number of bytes read

§Safety

help wanted: not entirely sure what the safety requirements are here.

source§

impl<'model> LlamaContext<'model>

source

pub fn n_batch(&self) -> u32

Gets the max number of tokens in a batch.

source

pub fn n_ctx(&self) -> u32

Gets the size of the context.

source

pub fn decode(&mut self, batch: &mut LlamaBatch) -> Result<(), DecodeError>

Decodes the batch.

§Errors
  • DecodeError if the decoding failed.
§Panics
  • the returned [c_int] from llama-cpp does not fit into a i32 (this should never happen on most systems)
source

pub fn embeddings_seq_ith(&self, i: i32) -> Result<&[f32], EmbeddingsError>

Get the embeddings for the ith sequence in the current context.

§Returns

A slice containing the embeddings for the last decoded batch. The size corresponds to the n_embd parameter of the context’s model.

§Errors
  • When the current context was constructed without enabling embeddings.
  • If the current model had a pooling type of llama_cpp_sys_2::LLAMA_POOLING_TYPE_NONE
  • If the given sequence index exceeds the max sequence id.
§Panics
  • n_embd does not fit into a usize
source

pub fn embeddings_ith(&self, i: i32) -> Result<&[f32], EmbeddingsError>

Get the embeddings for the ith token in the current context.

§Returns

A slice containing the embeddings for the last decoded batch of the given token. The size corresponds to the n_embd parameter of the context’s model.

§Errors
  • When the current context was constructed without enabling embeddings.
  • When the given token didn’t have logits enabled when it was passed.
  • If the given token index exceeds the max token id.
§Panics
  • n_embd does not fit into a usize
source

pub fn candidates_ith( &self, i: i32 ) -> impl Iterator<Item = LlamaTokenData> + '_

Get the logits for the ith token in the context.

§Panics
  • logit i is not initialized.
source

pub fn get_logits_ith(&self, i: i32) -> &[f32]

Get the logits for the ith token in the context.

§Panics
  • i is greater than n_ctx
  • n_vocab does not fit into a usize
  • logit i is not initialized.
source

pub fn reset_timings(&mut self)

Reset the timings for the context.

source

pub fn timings(&mut self) -> LlamaTimings

Returns the timings for the context.

Trait Implementations§

source§

impl Debug for LlamaContext<'_>

source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
source§

impl Drop for LlamaContext<'_>

source§

fn drop(&mut self)

Executes the destructor for this type. Read more

Auto Trait Implementations§

§

impl<'a> RefUnwindSafe for LlamaContext<'a>

§

impl<'a> !Send for LlamaContext<'a>

§

impl<'a> !Sync for LlamaContext<'a>

§

impl<'a> Unpin for LlamaContext<'a>

§

impl<'a> UnwindSafe for LlamaContext<'a>

Blanket Implementations§

source§

impl<T> Any for T
where T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for T
where T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

source§

impl<T> Instrument for T

source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
source§

impl<T, U> Into<U> for T
where U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
source§

impl<T> WithSubscriber for T

source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more