omni_llm_kit/model/
model.rs

1use crate::model::errors::LanguageModelCompletionError;
2use crate::model::types::{
3    LanguageModelCompletionEvent, LanguageModelId, LanguageModelName, LanguageModelProviderId,
4    LanguageModelProviderName, LanguageModelToolSchemaFormat,
5};
6use futures_core::stream::BoxStream;
7use crate::CompletionMode;
8use crate::model::LanguageModelRequest;
9
10#[async_trait::async_trait]
11pub trait LanguageModel: Send + Sync {
12    fn id(&self) -> LanguageModelId;
13    fn name(&self) -> LanguageModelName;
14    fn provider_id(&self) -> LanguageModelProviderId;
15    fn provider_name(&self) -> LanguageModelProviderName;
16    fn max_token_count(&self) -> u64;
17    fn max_output_tokens(&self) -> Option<u64> {
18        None
19    }
20    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
21        LanguageModelToolSchemaFormat::JsonSchema
22    }
23    async fn stream_completion(
24        &self,
25        request: LanguageModelRequest,
26    ) -> Result<
27        BoxStream<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
28        LanguageModelCompletionError,
29    >;
30    fn supports_tools(&self) -> bool;
31    fn supports_burn_mode(&self) -> bool;
32    fn max_token_count_in_burn_mode(&self) -> Option<u64> {
33        None
34    }
35}
36
37pub trait LanguageModelExt: LanguageModel {
38    fn max_token_count_for_mode(&self, mode: CompletionMode) -> u64 {
39        match mode {
40            CompletionMode::Normal => self.max_token_count(),
41            CompletionMode::Max => self
42                .max_token_count_in_burn_mode()
43                .unwrap_or_else(|| self.max_token_count()),
44        }
45    }
46}
47
48impl LanguageModelExt for dyn LanguageModel + Send + Sync {}