use std::{
path::{Path, PathBuf},
sync::Arc,
};
use tokenizers::Tokenizer;
use crate::{
ChatMessage, ContentPart, ImageInput,
chat_template::{
BOS, BOS_TOKEN_ID, EOS_TOKEN_ID, IM_END, IM_START, IM_START_TOKEN_ID, IMAGE_END,
IMAGE_END_TOKEN_ID, IMAGE_START, IMAGE_START_TOKEN_ID, IMAGE_THUMBNAIL,
IMAGE_THUMBNAIL_TOKEN_ID, IMAGE_TOKEN, IMAGE_TOKEN_ID, IMG_ROW_COL_BASE_ID,
},
error::{Error, Result},
generate::{GenerateInputs, generate},
options::{Options, RequestOptions},
preproc::Preprocessor,
runtime::{
decoder::Decoder,
embed_tokens::EmbedTokens,
sampler::{ConstrainedSampler, FreeSampler},
vision::VisionEncoder,
},
};
use llguidance::{Constraint, ParserFactory, api::TopLevelGrammar};
use toktrie::TokEnv;
pub struct Engine {
preproc: Preprocessor,
vision: VisionEncoder,
embed: EmbedTokens,
decoder: Decoder,
tokenizer: Tokenizer,
tokenizer_bytes: Vec<u8>,
parser_factory: Option<Arc<ParserFactory>>,
eos_token_id: u32,
next_seed: u64,
}
impl Engine {
#[cfg(feature = "bundled")]
#[cfg_attr(docsrs, doc(cfg(feature = "bundled")))]
pub fn from_dir<P: AsRef<Path>>(model_dir: P, opts: Options) -> Result<Self> {
let dir: PathBuf = model_dir.as_ref().to_path_buf();
validate_preprocessor_config(&dir.join("preprocessor_config.json"))?;
validate_tokenizer_matches_bundled(&dir.join("tokenizer.json"))?;
validate_chat_template_matches_bundled(&dir.join("chat_template.jinja"))?;
validate_config_context_matches_bundled(&dir.join("config.json"))?;
let onnx = dir.join("onnx");
Self::from_paths(
EnginePaths::new(
onnx.join("vision_encoder.onnx"),
onnx.join("embed_tokens.onnx"),
onnx.join("decoder_model_merged.onnx"),
dir.join("tokenizer.json"),
),
opts,
)
}
#[cfg(feature = "bundled")]
#[cfg_attr(docsrs, doc(cfg(feature = "bundled")))]
pub fn from_onnx_dir<P: AsRef<Path>>(onnx_dir: P, opts: Options) -> Result<Self> {
let onnx = onnx_dir.as_ref();
let tmp_tokenizer = write_bundled_tokenizer()?;
Self::from_paths(
EnginePaths::new(
onnx.join("vision_encoder.onnx"),
onnx.join("embed_tokens.onnx"),
onnx.join("decoder_model_merged.onnx"),
tmp_tokenizer,
),
opts,
)
}
pub fn from_paths(paths: EnginePaths, opts: Options) -> Result<Self> {
opts.image_budget().validate()?;
let preproc = Preprocessor::new(*opts.image_budget());
let vision = VisionEncoder::from_path(paths.vision(), &opts)?;
let embed = EmbedTokens::from_path(paths.embed(), &opts)?;
let decoder = Decoder::from_path(paths.decoder(), &opts)?;
let tokenizer_path = paths.tokenizer().clone();
let tokenizer_bytes = std::fs::read(&tokenizer_path).map_err(Error::Io)?;
let tokenizer = Tokenizer::from_bytes(&tokenizer_bytes).map_err(Error::tokenizer)?;
let eos_token_id = tokenizer
.token_to_id(IM_END)
.ok_or(Error::InvalidRequest("tokenizer missing <|im_end|> token"))?;
if eos_token_id != EOS_TOKEN_ID {
return Err(Error::InvalidRequest(
"tokenizer <|im_end|> token id differs from expected EOS_TOKEN_ID (7) — wrong tokenizer.json?",
));
}
validate_image_tokenizer_contract(&tokenizer, opts.image_budget().max_tiles())?;
let next_seed = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_nanos() as u64)
.unwrap_or(0xC0_FFEE);
Ok(Self {
preproc,
vision,
embed,
decoder,
tokenizer,
tokenizer_bytes,
parser_factory: None,
eos_token_id,
next_seed,
})
}
pub fn generate(
&mut self,
messages: &[ChatMessage],
images: &[ImageInput<'_>],
req: &RequestOptions,
) -> Result<String> {
req.validate()?;
let seed = self.draw_seed();
let mut sampler = FreeSampler::new(*req, seed, self.tokenizer.get_vocab_size(true) as u32);
generate(
&self.preproc,
&mut self.vision,
&mut self.embed,
&mut self.decoder,
&self.tokenizer,
&mut sampler,
GenerateInputs::new(messages, images, req, self.eos_token_id),
)
}
pub fn run<T: llmtask::Task>(
&mut self,
task: &T,
images: &[ImageInput<'_>],
req: &RequestOptions,
) -> Result<T::Output>
where
Error: From<T::ParseError>,
{
req.validate()?;
if images.len().saturating_add(1) > crate::generate::MAX_TOTAL_CONTENT_PARTS {
return Err(Error::InvalidRequest(
"too many images per request (request-shape DoS guard)",
));
}
crate::generate::check_image_count_lower_bound(
images.len(),
self
.preproc
.budget()
.min_image_tokens()
.saturating_add(crate::generate::IMAGE_BLOCK_WRAPPER_TOKENS),
req.max_new_tokens(),
)?;
let mut parts: Vec<ContentPart> = Vec::with_capacity(images.len() + 1);
for _ in 0..images.len() {
parts.push(ContentPart::Image);
}
parts.push(ContentPart::Text(task.prompt().to_owned()));
let messages = [ChatMessage::parts(
smol_str::SmolStr::new_static("user"),
parts,
)];
let factory = self.parser_factory()?;
let constraint = build_constraint(&factory, &task.grammar())?;
let seed = self.draw_seed();
let mut sampler = ConstrainedSampler::new(
constraint,
*req,
seed,
self.tokenizer.get_vocab_size(true) as u32,
);
let text = generate(
&self.preproc,
&mut self.vision,
&mut self.embed,
&mut self.decoder,
&self.tokenizer,
&mut sampler,
GenerateInputs::new(&messages, images, req, self.eos_token_id),
)?;
task.parse(&text).map_err(Error::from)
}
fn draw_seed(&mut self) -> u64 {
let seed = self.next_seed;
self.next_seed = self.next_seed.wrapping_add(1);
seed
}
fn parser_factory(&mut self) -> Result<Arc<ParserFactory>> {
if let Some(f) = &self.parser_factory {
return Ok(f.clone());
}
let factory = build_parser_factory(&self.tokenizer_bytes)?;
let arc = Arc::new(factory);
self.parser_factory = Some(arc.clone());
Ok(arc)
}
}
#[allow(dead_code)]
fn validate_image_tokenizer_contract(tokenizer: &Tokenizer, max_tiles: usize) -> Result<()> {
let id_check = |name_str: &str, expected: u32| -> Result<()> {
let actual = tokenizer
.token_to_id(name_str)
.ok_or(Error::InvalidRequest(
"tokenizer missing required special token — wrong tokenizer.json?",
))?;
if actual != expected {
return Err(Error::InvalidRequest(
"tokenizer special-token id differs from expected — wrong tokenizer.json?",
));
}
Ok(())
};
id_check(BOS, BOS_TOKEN_ID)?;
id_check(IM_START, IM_START_TOKEN_ID)?;
id_check(IMAGE_TOKEN, IMAGE_TOKEN_ID)?;
id_check(IMAGE_START, IMAGE_START_TOKEN_ID)?;
id_check(IMAGE_END, IMAGE_END_TOKEN_ID)?;
id_check(IMAGE_THUMBNAIL, IMAGE_THUMBNAIL_TOKEN_ID)?;
if max_tiles > crate::options::MAX_TOKENIZER_TILE_DIM {
return Err(Error::InvalidBudget(
"max_tiles must be <= 10 (bundled tokenizer's row/col marker grid is 10x10)",
));
}
for r in 1..=max_tiles as u32 {
for c in 1..=max_tiles as u32 {
let marker = format!("<|img_row_{r}_col_{c}|>");
let actual = tokenizer
.token_to_id(&marker)
.ok_or(Error::InvalidRequest(
"tokenizer missing one or more <|img_row_R_col_C|> markers reachable under max_tiles — wrong tokenizer.json?",
))?;
let expected = IMG_ROW_COL_BASE_ID + (r - 1) * 10 + (c - 1);
if actual != expected {
return Err(Error::InvalidRequest(
"tokenizer <|img_row_R_col_C|> id differs from expected (IMG_ROW_COL_BASE_ID + (R-1)*10 + (C-1)) — wrong tokenizer.json?",
));
}
}
}
Ok(())
}
#[cfg_attr(not(feature = "bundled"), allow(dead_code))]
fn validate_preprocessor_config(path: &Path) -> Result<()> {
if !path.exists() {
return Err(Error::InvalidRequest(
"model directory missing preprocessor_config.json — use from_paths to bypass strict drift checks",
));
}
let raw = std::fs::read_to_string(path).map_err(Error::Io)?;
let cfg: serde_json::Value = serde_json::from_str(&raw)
.map_err(|e| Error::tokenizer(format!("preprocessor_config.json parse failure: {e}")))?;
let read_u64 = |key: &'static str| -> Result<u64> {
cfg
.get(key)
.and_then(|v| v.as_u64())
.ok_or(Error::InvalidRequest(
"preprocessor_config.json missing required integer field — wrong model revision?",
))
};
let read_bool = |key: &'static str| -> Result<bool> {
cfg
.get(key)
.and_then(|v| v.as_bool())
.ok_or(Error::InvalidRequest(
"preprocessor_config.json missing required boolean field — wrong model revision?",
))
};
let read_str = |key: &'static str| -> Result<&str> {
cfg
.get(key)
.and_then(|v| v.as_str())
.ok_or(Error::InvalidRequest(
"preprocessor_config.json missing required string field — wrong model revision?",
))
};
let read_f64 = |key: &'static str| -> Result<f64> {
cfg
.get(key)
.and_then(|v| v.as_f64())
.ok_or(Error::InvalidRequest(
"preprocessor_config.json missing required number field — wrong model revision?",
))
};
let read_f32_array3 = |key: &'static str| -> Result<[f32; 3]> {
let arr = cfg
.get(key)
.and_then(|v| v.as_array())
.ok_or(Error::InvalidRequest(
"preprocessor_config.json missing required [f32; 3] field — wrong model revision?",
))?;
if arr.len() != 3 {
return Err(Error::InvalidRequest(
"preprocessor_config.json field is not a 3-element array — wrong model revision?",
));
}
let mut out = [0f32; 3];
for (i, v) in arr.iter().enumerate() {
out[i] = v.as_f64().ok_or(Error::InvalidRequest(
"preprocessor_config.json array element is not a number — wrong model revision?",
))? as f32;
}
Ok(out)
};
if read_u64("encoder_patch_size")? != crate::preproc::tile_grid::PATCH_SIZE as u64 {
return Err(Error::InvalidRequest(
"preprocessor_config.json encoder_patch_size != 16 (lfm crate hardcoded) — wrong model revision?",
));
}
if read_u64("downsample_factor")? != crate::preproc::tile_grid::DOWNSAMPLE_FACTOR as u64 {
return Err(Error::InvalidRequest(
"preprocessor_config.json downsample_factor != 2 (lfm crate hardcoded) — wrong model revision?",
));
}
if read_u64("tile_size")? != crate::preproc::tile_grid::FULL_TILE_SIZE as u64 {
return Err(Error::InvalidRequest(
"preprocessor_config.json tile_size != 512 (lfm crate hardcoded) — wrong model revision?",
));
}
for (key, expected) in [
("do_resize", true),
("do_rescale", true),
("do_normalize", true),
("do_pad", true),
("do_image_splitting", true),
] {
if read_bool(key)? != expected {
return Err(Error::InvalidRequest(
"preprocessor_config.json boolean preprocessing flag differs from lfm crate hardcoded value — wrong model revision?",
));
}
}
if read_str("data_format")? != "channels_first" {
return Err(Error::InvalidRequest(
"preprocessor_config.json data_format != channels_first — wrong model revision?",
));
}
if read_u64("resample")? != 2 {
return Err(Error::InvalidRequest(
"preprocessor_config.json resample != 2 (BILINEAR) — wrong model revision?",
));
}
let rf = read_f64("rescale_factor")?;
if (rf - (1.0 / 255.0)).abs() > 1e-9 {
return Err(Error::InvalidRequest(
"preprocessor_config.json rescale_factor != 1/255 — wrong model revision?",
));
}
let size = cfg
.get("size")
.and_then(|v| v.as_object())
.ok_or(Error::InvalidRequest(
"preprocessor_config.json missing size object — wrong model revision?",
))?;
for (key, expected) in [("height", 512u64), ("width", 512u64)] {
if size.get(key).and_then(|v| v.as_u64()) != Some(expected) {
return Err(Error::InvalidRequest(
"preprocessor_config.json size.{height,width} != 512 — wrong model revision?",
));
}
}
for (key, expected) in [("image_mean", [0.5f32; 3]), ("image_std", [0.5f32; 3])] {
let got = read_f32_array3(key)?;
for (g, e) in got.iter().zip(expected.iter()) {
if (g - e).abs() > 1e-4 {
return Err(Error::InvalidRequest(
"preprocessor_config.json image_mean/image_std differs from [0.5, 0.5, 0.5] (lfm crate hardcoded normalization) — wrong model revision?",
));
}
}
}
Ok(())
}
#[cfg(feature = "bundled")]
fn validate_config_context_matches_bundled(path: &Path) -> Result<()> {
if !path.exists() {
return Err(Error::InvalidRequest(
"model directory missing config.json — use from_paths to bypass strict context-length drift checks (advanced: requires matching ONNX embedding table)",
));
}
let supplied = std::fs::read(path).map_err(Error::Io)?;
let v: serde_json::Value = serde_json::from_slice(&supplied)
.map_err(|_| Error::InvalidRequest("config.json is not valid JSON"))?;
let max_pos = v
.get("text_config")
.and_then(|tc| tc.get("max_position_embeddings"))
.or_else(|| v.get("max_position_embeddings"))
.and_then(|n| n.as_u64())
.ok_or(Error::InvalidRequest(
"config.json missing text_config.max_position_embeddings (or top-level max_position_embeddings)",
))?;
if max_pos != crate::options::MODEL_CONTEXT_TOKENS as u64 {
return Err(Error::InvalidRequest(
"config.json max_position_embeddings differs from crate's MODEL_CONTEXT_TOKENS (128_000) — admission gates would accept requests past the loaded model's real position limit",
));
}
Ok(())
}
#[cfg(feature = "bundled")]
fn validate_chat_template_matches_bundled(path: &Path) -> Result<()> {
if !path.exists() {
return Err(Error::InvalidRequest(
"model directory missing chat_template.jinja — use from_paths to bypass strict prompt-template drift checks (advanced: requires matching ONNX embedding table)",
));
}
let supplied = std::fs::read(path).map_err(Error::Io)?;
if supplied != crate::bundled::CHAT_TEMPLATE_JINJA {
return Err(Error::InvalidRequest(
"supplied chat_template.jinja bytes do not match the bundled chat template — engine renders with bundled template; mismatched model template would produce semantically wrong prompts even when <image> counts line up",
));
}
Ok(())
}
#[cfg(feature = "bundled")]
fn validate_tokenizer_matches_bundled(path: &Path) -> Result<()> {
let supplied = std::fs::read(path).map_err(Error::Io)?;
if supplied != crate::bundled::TOKENIZER_JSON {
return Err(Error::InvalidRequest(
"supplied tokenizer.json bytes do not match the bundled tokenizer — use Engine::from_paths to bypass strict tokenizer-identity check (advanced: requires matching ONNX embedding table)",
));
}
Ok(())
}
#[cfg(feature = "bundled")]
fn write_bundled_tokenizer() -> Result<PathBuf> {
use std::sync::Mutex;
static CACHE: Mutex<Option<PathBuf>> = Mutex::new(None);
let mut guard = CACHE
.lock()
.expect("write_bundled_tokenizer mutex poisoned");
if let Some(p) = guard.as_ref() {
match std::fs::read(p) {
Ok(existing) if existing == crate::bundled::TOKENIZER_JSON => return Ok(p.clone()),
_ => {
*guard = None;
}
}
}
let hash = simple_hash_hex(crate::bundled::TOKENIZER_JSON);
let dir = std::env::temp_dir().join(format!("lfm-bundled-{hash}"));
std::fs::create_dir_all(&dir).map_err(Error::Io)?;
let path = dir.join("tokenizer.json");
let needs_write = match std::fs::read(&path) {
Ok(existing) if existing == crate::bundled::TOKENIZER_JSON => false,
Ok(_) => true,
Err(_) => true,
};
if needs_write {
let tid = std::thread::current().id();
let tmp = dir.join(format!(
"tokenizer.json.{}.{:?}.tmp",
std::process::id(),
tid
));
std::fs::write(&tmp, crate::bundled::TOKENIZER_JSON).map_err(Error::Io)?;
if let Err(rename_err) = std::fs::rename(&tmp, &path) {
let _ = std::fs::remove_file(&tmp); match std::fs::read(&path) {
Ok(existing) if existing == crate::bundled::TOKENIZER_JSON => {
}
_ => return Err(Error::Io(rename_err)),
}
}
}
*guard = Some(path.clone());
Ok(path)
}
#[cfg(feature = "bundled")]
fn simple_hash_hex(bytes: &[u8]) -> String {
let mut h: u64 = 0xcbf29ce484222325;
for &b in bytes {
h ^= b as u64;
h = h.wrapping_mul(0x100000001b3);
}
format!("{h:016x}")
}
pub struct EnginePaths {
vision: PathBuf,
embed: PathBuf,
decoder: PathBuf,
tokenizer: PathBuf,
}
impl EnginePaths {
pub fn new(vision: PathBuf, embed: PathBuf, decoder: PathBuf, tokenizer: PathBuf) -> Self {
Self {
vision,
embed,
decoder,
tokenizer,
}
}
pub fn vision(&self) -> &PathBuf {
&self.vision
}
pub fn embed(&self) -> &PathBuf {
&self.embed
}
pub fn decoder(&self) -> &PathBuf {
&self.decoder
}
pub fn tokenizer(&self) -> &PathBuf {
&self.tokenizer
}
pub fn set_vision(&mut self, vision: PathBuf) {
self.vision = vision;
}
pub fn set_embed(&mut self, embed: PathBuf) {
self.embed = embed;
}
pub fn set_decoder(&mut self, decoder: PathBuf) {
self.decoder = decoder;
}
pub fn set_tokenizer(&mut self, tokenizer: PathBuf) {
self.tokenizer = tokenizer;
}
pub fn with_vision(mut self, vision: PathBuf) -> Self {
self.vision = vision;
self
}
pub fn with_embed(mut self, embed: PathBuf) -> Self {
self.embed = embed;
self
}
pub fn with_decoder(mut self, decoder: PathBuf) -> Self {
self.decoder = decoder;
self
}
pub fn with_tokenizer(mut self, tokenizer: PathBuf) -> Self {
self.tokenizer = tokenizer;
self
}
}
fn build_parser_factory(tokenizer_bytes: &[u8]) -> Result<ParserFactory> {
let byte_tok = toktrie_hf_tokenizers::ByteTokenizer::from_json_bytes(tokenizer_bytes)
.map_err(Error::llguidance)?;
let tok_env: TokEnv = byte_tok.into_tok_env(None).map_err(Error::llguidance)?;
ParserFactory::new_simple(&tok_env).map_err(Error::llguidance)
}
fn build_constraint(factory: &ParserFactory, grammar: &llmtask::Grammar) -> Result<Constraint> {
let top = match grammar {
llmtask::Grammar::JsonSchema(schema) => TopLevelGrammar::from_json_schema(schema.clone()),
llmtask::Grammar::Lark(src) => TopLevelGrammar::from_lark(src.to_string()),
llmtask::Grammar::Regex(rg) => TopLevelGrammar::from_regex(rg.pattern()),
_ => {
return Err(Error::InvalidRequest(
"llmtask::Grammar variant unsupported by lfm — please open an issue (lfm uses llguidance and can extend support)",
));
}
};
let parser = factory.create_parser(top).map_err(Error::llguidance)?;
Ok(Constraint::new(parser))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn engine_paths_accessors() {
let ep = EnginePaths::new(
PathBuf::from("v.onnx"),
PathBuf::from("e.onnx"),
PathBuf::from("d.onnx"),
PathBuf::from("t.json"),
);
assert_eq!(ep.vision(), &PathBuf::from("v.onnx"));
assert_eq!(ep.tokenizer(), &PathBuf::from("t.json"));
}
#[test]
fn validate_image_tokenizer_contract_caps_max_tiles() {
#[cfg(feature = "bundled")]
{
let path = write_bundled_tokenizer().expect("write bundled tokenizer");
let tokenizer = Tokenizer::from_file(&path).expect("load tokenizer");
let r =
validate_image_tokenizer_contract(&tokenizer, crate::options::MAX_TOKENIZER_TILE_DIM + 1);
assert!(
matches!(r, Err(Error::InvalidBudget(_))),
"must reject max_tiles above the cap, got {r:?}"
);
assert!(
validate_image_tokenizer_contract(&tokenizer, crate::options::MAX_TOKENIZER_TILE_DIM)
.is_ok()
);
}
}
#[test]
#[cfg(feature = "bundled")]
fn validate_tokenizer_matches_bundled_rejects_drift() {
let dir = std::env::temp_dir().join(format!("lfm-test-drift-{}", std::process::id()));
std::fs::create_dir_all(&dir).unwrap();
let drift_path = dir.join("tokenizer-drift.json");
let mut bytes = crate::bundled::TOKENIZER_JSON.to_vec();
let last = bytes.len() - 1;
bytes[last] = bytes[last].wrapping_add(1);
std::fs::write(&drift_path, &bytes).unwrap();
let result = validate_tokenizer_matches_bundled(&drift_path);
assert!(
matches!(result, Err(Error::InvalidRequest(_))),
"drifted tokenizer must be rejected, got {result:?}"
);
let ok_path = dir.join("tokenizer-ok.json");
std::fs::write(&ok_path, crate::bundled::TOKENIZER_JSON).unwrap();
assert!(validate_tokenizer_matches_bundled(&ok_path).is_ok());
}
#[test]
#[cfg(feature = "bundled")]
fn validate_config_context_matches_bundled_accepts_correct_and_rejects_drift() {
let dir = std::env::temp_dir().join(format!("lfm-test-config-drift-{}", std::process::id()));
std::fs::create_dir_all(&dir).unwrap();
let missing = dir.join("config-missing.json");
let _ = std::fs::remove_file(&missing);
assert!(matches!(
validate_config_context_matches_bundled(&missing),
Err(Error::InvalidRequest(_))
));
let drift = dir.join("config-drift.json");
std::fs::write(
&drift,
r#"{"text_config":{"max_position_embeddings":4096}}"#,
)
.unwrap();
assert!(matches!(
validate_config_context_matches_bundled(&drift),
Err(Error::InvalidRequest(_))
));
let ok_nested = dir.join("config-ok-nested.json");
std::fs::write(
&ok_nested,
r#"{"text_config":{"max_position_embeddings":128000}}"#,
)
.unwrap();
assert!(validate_config_context_matches_bundled(&ok_nested).is_ok());
let ok_flat = dir.join("config-ok-flat.json");
std::fs::write(&ok_flat, r#"{"max_position_embeddings":128000}"#).unwrap();
assert!(validate_config_context_matches_bundled(&ok_flat).is_ok());
let ok_bundled = dir.join("config-ok-bundled.json");
std::fs::write(&ok_bundled, crate::bundled::CONFIG_JSON).unwrap();
assert!(validate_config_context_matches_bundled(&ok_bundled).is_ok());
let bad_json = dir.join("config-bad.json");
std::fs::write(&bad_json, b"{not json").unwrap();
assert!(matches!(
validate_config_context_matches_bundled(&bad_json),
Err(Error::InvalidRequest(_))
));
}
#[test]
#[cfg(feature = "bundled")]
fn validate_chat_template_matches_bundled_rejects_drift_and_missing() {
let dir = std::env::temp_dir().join(format!("lfm-test-tmpl-drift-{}", std::process::id()));
std::fs::create_dir_all(&dir).unwrap();
let missing = dir.join("chat_template-missing.jinja");
let _ = std::fs::remove_file(&missing);
assert!(matches!(
validate_chat_template_matches_bundled(&missing),
Err(Error::InvalidRequest(_))
));
let drift = dir.join("chat_template-drift.jinja");
let mut bytes = crate::bundled::CHAT_TEMPLATE_JINJA.to_vec();
let last = bytes.len() - 1;
bytes[last] = bytes[last].wrapping_add(1);
std::fs::write(&drift, &bytes).unwrap();
assert!(matches!(
validate_chat_template_matches_bundled(&drift),
Err(Error::InvalidRequest(_))
));
let ok = dir.join("chat_template-ok.jinja");
std::fs::write(&ok, crate::bundled::CHAT_TEMPLATE_JINJA).unwrap();
assert!(validate_chat_template_matches_bundled(&ok).is_ok());
}
}