pub struct TokenCounter { /* private fields */ }
Expand description
Token counter for different providers and models
Implementations§
Source§impl TokenCounter
impl TokenCounter
Sourcepub fn add_model_estimate(&mut self, model: String, tokens_per_char: f64)
pub fn add_model_estimate(&mut self, model: String, tokens_per_char: f64)
Add custom model estimate
Sourcepub fn add_provider_multiplier(&mut self, provider: String, multiplier: f64)
pub fn add_provider_multiplier(&mut self, provider: String, multiplier: f64)
Add provider multiplier
Sourcepub fn estimate_text_tokens(&self, text: &str, model: Option<&str>) -> usize
pub fn estimate_text_tokens(&self, text: &str, model: Option<&str>) -> usize
Estimate tokens for text
Sourcepub fn estimate_message_tokens(
&self,
message: &LLMMessage,
model: Option<&str>,
provider: Option<&str>,
) -> usize
pub fn estimate_message_tokens( &self, message: &LLMMessage, model: Option<&str>, provider: Option<&str>, ) -> usize
Estimate tokens for a message
Sourcepub fn estimate_conversation_tokens(
&self,
messages: &[LLMMessage],
model: Option<&str>,
provider: Option<&str>,
) -> usize
pub fn estimate_conversation_tokens( &self, messages: &[LLMMessage], model: Option<&str>, provider: Option<&str>, ) -> usize
Estimate tokens for a conversation
Sourcepub fn truncate_to_fit(
&self,
messages: Vec<LLMMessage>,
context_window: &ContextWindow,
model: Option<&str>,
provider: Option<&str>,
) -> LLMResult<Vec<LLMMessage>>
pub fn truncate_to_fit( &self, messages: Vec<LLMMessage>, context_window: &ContextWindow, model: Option<&str>, provider: Option<&str>, ) -> LLMResult<Vec<LLMMessage>>
Truncate messages to fit in context window
Sourcepub fn get_context_window(&self, model: &str) -> ContextWindow
pub fn get_context_window(&self, model: &str) -> ContextWindow
Get context window for a model
Sourcepub fn optimize_messages(
&self,
messages: Vec<LLMMessage>,
context_window: &ContextWindow,
model: Option<&str>,
provider: Option<&str>,
) -> LLMResult<Vec<LLMMessage>>
pub fn optimize_messages( &self, messages: Vec<LLMMessage>, context_window: &ContextWindow, model: Option<&str>, provider: Option<&str>, ) -> LLMResult<Vec<LLMMessage>>
Optimize message history for token efficiency
Trait Implementations§
Source§impl Debug for TokenCounter
impl Debug for TokenCounter
Auto Trait Implementations§
impl Freeze for TokenCounter
impl RefUnwindSafe for TokenCounter
impl Send for TokenCounter
impl Sync for TokenCounter
impl Unpin for TokenCounter
impl UnwindSafe for TokenCounter
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more