use token_count::api::consent::ConsentPrompt;
use token_count::cli::{list_models, read_stdin, Cli};
use token_count::error::TokenError;
use token_count::tokenizers::registry::ModelRegistry;
use token_count::{count_tokens, select_formatter};
fn main() {
let cli = Cli::parse_args();
if cli.list_models {
list_models();
std::process::exit(0);
}
let result = run(cli);
if let Err(e) = result {
eprintln!("Error: {}", e);
std::process::exit(e.exit_code());
}
}
fn run(cli: Cli) -> Result<(), TokenError> {
if cli.accurate && !cli.yes {
let registry = ModelRegistry::global();
let model_config = registry.get_model(&cli.model)?;
if model_config.encoding == "anthropic-claude" {
let consent = ConsentPrompt {
provider: "Anthropic",
api_endpoint: "https://api.anthropic.com/v1/messages/count_tokens",
};
match consent.ask() {
Ok(true) => {
}
Ok(false) => {
eprintln!(
"API call cancelled by user. Falling back to estimation mode (±10% accuracy)."
);
eprintln!("To use estimation without this prompt, omit the --accurate flag.");
return run_tokenization(&cli, false);
}
Err(e) => {
return Err(e);
}
}
}
}
run_tokenization(&cli, cli.accurate)
}
fn run_tokenization(cli: &Cli, accurate: bool) -> Result<(), TokenError> {
let input = read_stdin()?;
let result = count_tokens(&input, &cli.model, accurate, cli.verbose)?;
let formatter = select_formatter(cli.verbose);
println!("{}", formatter.format(&result));
Ok(())
}