llama_runner/runner/
req.rs1use crate::{
2 ImageOrText, MessageRole,
3 sample::{LlguidanceSamplingParams, SimpleSamplingParams},
4 template::ModelChatTemplate,
5};
6
7#[derive(Debug, Clone)]
8pub struct GenericRunnerRequest<MsgCt, Tmpl> {
9 pub messages: Vec<(MessageRole, MsgCt)>,
10 pub sampling: SimpleSamplingParams,
11 pub llguidance: Option<LlguidanceSamplingParams>,
12 pub max_seq: usize,
13 pub prefill: Option<String>,
14 pub tmpl: Tmpl,
15}
16
17pub type GenericTextLmRequest<'a, Tmpl> = GenericRunnerRequest<&'a str, Tmpl>;
18pub type GenericVisionLmRequest<'a, Tmpl> = GenericRunnerRequest<ImageOrText<'a>, Tmpl>;
19
20pub type RunnerRequest<'a, MsgCnt> = GenericRunnerRequest<MsgCnt, ModelChatTemplate>;
21pub type TextLmRequest<'a> = RunnerRequest<'a, &'a str>;
22pub type VisionLmRequest<'a> = RunnerRequest<'a, ImageOrText<'a>>;
23
24impl<M, T> Default for GenericRunnerRequest<M, T>
25where
26 T: Default,
27{
28 fn default() -> Self {
29 Self {
30 messages: vec![],
31 sampling: Default::default(),
32 llguidance: None,
33 max_seq: usize::MAX,
34 prefill: None,
35 tmpl: Default::default(),
36 }
37 }
38}