BaseLanguageModel

Trait BaseLanguageModel 

Source
pub trait BaseLanguageModel: Send + Sync {
    // Required methods
    fn infer<'life0, 'life1, 'life2, 'async_trait>(
        &'life0 self,
        batch_prompts: &'life1 [String],
        kwargs: &'life2 HashMap<String, Value>,
    ) -> Pin<Box<dyn Future<Output = LangExtractResult<Vec<Vec<ScoredOutput>>>> + Send + 'async_trait>>
       where Self: 'async_trait,
             'life0: 'async_trait,
             'life1: 'async_trait,
             'life2: 'async_trait;
    fn model_id(&self) -> &str;
    fn provider_name(&self) -> &str;

    // Provided methods
    fn get_schema_class(&self) -> Option<Box<dyn BaseSchema>> { ... }
    fn apply_schema(&mut self, _schema: Option<Box<dyn BaseSchema>>) { ... }
    fn set_fence_output(&mut self, _fence_output: Option<bool>) { ... }
    fn requires_fence_output(&self) -> bool { ... }
    fn infer_single<'life0, 'life1, 'life2, 'async_trait>(
        &'life0 self,
        prompt: &'life1 str,
        kwargs: &'life2 HashMap<String, Value>,
    ) -> Pin<Box<dyn Future<Output = LangExtractResult<Vec<ScoredOutput>>> + Send + 'async_trait>>
       where Self: 'async_trait,
             'life0: 'async_trait,
             'life1: 'async_trait,
             'life2: 'async_trait { ... }
    fn parse_output(&self, output: &str) -> LangExtractResult<Value> { ... }
    fn format_type(&self) -> FormatType { ... }
    fn supported_models() -> Vec<&'static str>
       where Self: Sized { ... }
    fn supports_model(model_id: &str) -> bool
       where Self: Sized { ... }
}
Expand description

Abstract base trait for language model inference

All language model providers must implement this trait to be compatible with the langextract framework.

Required Methods§

Source

fn infer<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, batch_prompts: &'life1 [String], kwargs: &'life2 HashMap<String, Value>, ) -> Pin<Box<dyn Future<Output = LangExtractResult<Vec<Vec<ScoredOutput>>>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait,

Perform inference on a batch of prompts

§Arguments
  • batch_prompts - Batch of input prompts for inference
  • kwargs - Additional inference parameters (temperature, max_tokens, etc.)
§Returns

A vector of batches, where each batch contains scored outputs for one prompt

Source

fn model_id(&self) -> &str

Get the model ID/name

Source

fn provider_name(&self) -> &str

Get the provider name

Provided Methods§

Source

fn get_schema_class(&self) -> Option<Box<dyn BaseSchema>>

Get the schema class this provider supports

Source

fn apply_schema(&mut self, _schema: Option<Box<dyn BaseSchema>>)

Apply a schema instance to this provider

Source

fn set_fence_output(&mut self, _fence_output: Option<bool>)

Set explicit fence output preference

Source

fn requires_fence_output(&self) -> bool

Whether this model requires fence output for parsing

Source

fn infer_single<'life0, 'life1, 'life2, 'async_trait>( &'life0 self, prompt: &'life1 str, kwargs: &'life2 HashMap<String, Value>, ) -> Pin<Box<dyn Future<Output = LangExtractResult<Vec<ScoredOutput>>> + Send + 'async_trait>>
where Self: 'async_trait, 'life0: 'async_trait, 'life1: 'async_trait, 'life2: 'async_trait,

Convenience method for single prompt inference

Source

fn parse_output(&self, output: &str) -> LangExtractResult<Value>

Parse model output as JSON or YAML

This expects raw JSON/YAML without code fences. Code fence extraction is handled by the resolver.

Source

fn format_type(&self) -> FormatType

Get the format type this model uses

Source

fn supported_models() -> Vec<&'static str>
where Self: Sized,

Get supported model IDs for this provider

Source

fn supports_model(model_id: &str) -> bool
where Self: Sized,

Check if this provider supports a given model ID

Implementors§