token_mode/
token_mode.rs

1use orp::params::RuntimeParameters;
2use gliner::util::result::Result;
3use gliner::model::{GLiNER, input::text::TextInput, params::Parameters};
4use gliner::model::pipeline::token::TokenMode;
5
6/// Sample usage of the public API in token mode
7fn main() -> Result<()> {    
8    
9    println!("Loading model...");
10    let model = GLiNER::<TokenMode>::new(
11        Parameters::default(),
12        RuntimeParameters::default(),
13        "models/gliner-multitask-large-v0.5/tokenizer.json",
14        "models/gliner-multitask-large-v0.5/onnx/model.onnx",
15    )?;
16    
17    let input = TextInput::from_str(
18        &[ 
19            "I am James Bond",
20            "This is James and I live in Chelsea, London.",
21            "My name is Bond, James Bond.",
22            "I like to drive my Aston Martin.",
23            "The villain in the movie is Auric Goldfinger."
24        ],
25        &[
26            "person", 
27            "location",
28            "vehicle",
29        ]
30    )?;
31
32    println!("Inferencing...");
33    let output = model.inference(input)?;
34
35    println!("Results:");
36    for spans in output.spans {
37        for span in spans {
38            println!("{:3} | {:16} | {:10} | {:.1}%", span.sequence(), span.text(), span.class(), span.probability() * 100.0);
39        }
40    }
41
42    Ok(())
43
44}