pub trait BaseChatModel: BaseLanguageModel {
Show 31 methods
// Required methods
fn chat_config(&self) -> &ChatModelConfig;
fn _generate<'life0, 'life1, 'async_trait>(
&'life0 self,
messages: Vec<BaseMessage>,
stop: Option<Vec<String>>,
run_manager: Option<&'life1 CallbackManagerForLLMRun>,
) -> Pin<Box<dyn Future<Output = Result<ChatResult>> + Send + 'async_trait>>
where Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait;
// Provided methods
fn profile(&self) -> Option<&ModelProfile> { ... }
fn _agenerate<'life0, 'life1, 'async_trait>(
&'life0 self,
messages: Vec<BaseMessage>,
stop: Option<Vec<String>>,
_run_manager: Option<&'life1 AsyncCallbackManagerForLLMRun>,
) -> Pin<Box<dyn Future<Output = Result<ChatResult>> + Send + 'async_trait>>
where Self: Sync + 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait { ... }
fn _stream(
&self,
_messages: Vec<BaseMessage>,
_stop: Option<Vec<String>>,
_run_manager: Option<&CallbackManagerForLLMRun>,
) -> Result<ChatGenerationStream> { ... }
fn _astream<'life0, 'life1, 'async_trait>(
&'life0 self,
messages: Vec<BaseMessage>,
stop: Option<Vec<String>>,
_run_manager: Option<&'life1 AsyncCallbackManagerForLLMRun>,
) -> Pin<Box<dyn Future<Output = Result<ChatGenerationStream>> + Send + 'async_trait>>
where Self: Sync + 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait { ... }
fn get_first_message(&self, result: &ChatResult) -> Result<AIMessage> { ... }
fn _combine_llm_outputs(
&self,
_llm_outputs: &[Option<HashMap<String, Value>>],
) -> HashMap<String, Value> { ... }
fn _convert_cached_generations(
&self,
cache_val: Vec<Generation>,
) -> Vec<ChatGeneration> { ... }
fn _get_invocation_params(
&self,
stop: Option<&[String]>,
kwargs: Option<&HashMap<String, Value>>,
) -> HashMap<String, Value> { ... }
fn _get_llm_string(
&self,
stop: Option<&[String]>,
kwargs: Option<&HashMap<String, Value>>,
) -> String { ... }
fn has_stream_impl(&self) -> bool { ... }
fn has_astream_impl(&self) -> bool { ... }
fn has_streaming_field(&self) -> Option<bool> { ... }
fn _should_stream(
&self,
async_api: bool,
has_tools: bool,
stream_kwarg: Option<bool>,
run_manager: Option<&[Arc<dyn BaseCallbackHandler>]>,
) -> bool { ... }
fn generate<'life0, 'async_trait>(
&'life0 self,
messages: Vec<Vec<BaseMessage>>,
stop: Option<Vec<String>>,
_callbacks: Option<Callbacks>,
) -> Pin<Box<dyn Future<Output = Result<LLMResult>> + Send + 'async_trait>>
where Self: Sync + 'async_trait,
'life0: 'async_trait { ... }
fn agenerate<'life0, 'async_trait>(
&'life0 self,
messages: Vec<Vec<BaseMessage>>,
stop: Option<Vec<String>>,
_callbacks: Option<Callbacks>,
) -> Pin<Box<dyn Future<Output = Result<LLMResult>> + Send + 'async_trait>>
where Self: Sync + 'async_trait,
'life0: 'async_trait { ... }
fn _call_async<'life0, 'async_trait>(
&'life0 self,
messages: Vec<BaseMessage>,
stop: Option<Vec<String>>,
callbacks: Option<Callbacks>,
) -> Pin<Box<dyn Future<Output = Result<BaseMessage>> + Send + 'async_trait>>
where Self: Sync + 'async_trait,
'life0: 'async_trait { ... }
fn generate_with_tools<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
messages: Vec<BaseMessage>,
_tools: &'life1 [ToolDefinition],
_tool_choice: Option<&'life2 ToolChoice>,
stop: Option<Vec<String>>,
) -> Pin<Box<dyn Future<Output = Result<AIMessage>> + Send + 'async_trait>>
where Self: Sync + 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait { ... }
fn convert_input(
&self,
input: LanguageModelInput,
) -> Result<Vec<BaseMessage>> { ... }
fn invoke<'life0, 'async_trait>(
&'life0 self,
input: LanguageModelInput,
) -> Pin<Box<dyn Future<Output = Result<AIMessage>> + Send + 'async_trait>>
where Self: Sync + 'async_trait,
'life0: 'async_trait { ... }
fn ainvoke<'life0, 'async_trait>(
&'life0 self,
input: LanguageModelInput,
) -> Pin<Box<dyn Future<Output = Result<AIMessage>> + Send + 'async_trait>>
where Self: Sync + 'async_trait,
'life0: 'async_trait { ... }
fn bind_tools(
&self,
_tools: &[Arc<dyn BaseTool>],
_tool_choice: Option<ToolChoice>,
) -> Result<()> { ... }
fn get_tool_definitions(
&self,
tools: &[Arc<dyn BaseTool>],
) -> Vec<ToolDefinition> { ... }
fn stream<'life0, 'async_trait>(
&'life0 self,
input: LanguageModelInput,
stop: Option<Vec<String>>,
) -> Pin<Box<dyn Future<Output = Result<AIMessageChunkStream>> + Send + 'async_trait>>
where Self: Sync + 'async_trait,
'life0: 'async_trait { ... }
fn astream<'life0, 'async_trait>(
&'life0 self,
input: LanguageModelInput,
stop: Option<Vec<String>>,
) -> Pin<Box<dyn Future<Output = Result<AIMessageChunkStream>> + Send + 'async_trait>>
where Self: Sync + 'async_trait,
'life0: 'async_trait { ... }
fn stream_generations<'life0, 'life1, 'async_trait>(
&'life0 self,
messages: Vec<BaseMessage>,
stop: Option<Vec<String>>,
run_manager: Option<&'life1 CallbackManagerForLLMRun>,
) -> Pin<Box<dyn Future<Output = Result<ChatGenerationStream>> + Send + 'async_trait>>
where Self: Sync + 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait { ... }
fn get_chat_ls_params(&self, stop: Option<&[String]>) -> LangSmithParams { ... }
fn to_dict(&self) -> HashMap<String, Value> { ... }
fn with_structured_output(
&self,
_schema: Value,
_include_raw: bool,
) -> Result<()> { ... }
fn get_identifying_params(&self) -> HashMap<String, Value> { ... }
}Expand description
Base trait for all chat models.
This trait follows the LangChain pattern where each provider implements the core generation methods. The trait provides both sync-style (via async) and streaming interfaces.
§Implementation Guide
Custom chat model implementations should override these methods:
| Method/Property | Description | Required |
|---|---|---|
_generate | Use to generate a chat result from messages | Required |
_llm_type (property) | Used to uniquely identify the type of the model | Required |
_identifying_params | Represent model parameterization for tracing | Optional |
_stream | Use to implement streaming | Optional |
_agenerate | Use to implement a native async method | Optional |
_astream | Use to implement async version of _stream | Optional |
Required Methods§
Sourcefn chat_config(&self) -> &ChatModelConfig
fn chat_config(&self) -> &ChatModelConfig
Get the chat model configuration.
Sourcefn _generate<'life0, 'life1, 'async_trait>(
&'life0 self,
messages: Vec<BaseMessage>,
stop: Option<Vec<String>>,
run_manager: Option<&'life1 CallbackManagerForLLMRun>,
) -> Pin<Box<dyn Future<Output = Result<ChatResult>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn _generate<'life0, 'life1, 'async_trait>(
&'life0 self,
messages: Vec<BaseMessage>,
stop: Option<Vec<String>>,
run_manager: Option<&'life1 CallbackManagerForLLMRun>,
) -> Pin<Box<dyn Future<Output = Result<ChatResult>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
Core abstract method to generate a chat result.
Implementations must override this method.
§Arguments
messages- The messages to generate from.stop- Optional list of stop words to use when generating.run_manager- Optional callback manager to use for this call.
§Returns
The output chat result containing generations.
Provided Methods§
Sourcefn profile(&self) -> Option<&ModelProfile>
fn profile(&self) -> Option<&ModelProfile>
Get the model profile, if available.
Sourcefn _agenerate<'life0, 'life1, 'async_trait>(
&'life0 self,
messages: Vec<BaseMessage>,
stop: Option<Vec<String>>,
_run_manager: Option<&'life1 AsyncCallbackManagerForLLMRun>,
) -> Pin<Box<dyn Future<Output = Result<ChatResult>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn _agenerate<'life0, 'life1, 'async_trait>(
&'life0 self,
messages: Vec<BaseMessage>,
stop: Option<Vec<String>>,
_run_manager: Option<&'life1 AsyncCallbackManagerForLLMRun>,
) -> Pin<Box<dyn Future<Output = Result<ChatResult>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
Async version of _generate.
Default implementation calls _generate.
Sourcefn _stream(
&self,
_messages: Vec<BaseMessage>,
_stop: Option<Vec<String>>,
_run_manager: Option<&CallbackManagerForLLMRun>,
) -> Result<ChatGenerationStream>
fn _stream( &self, _messages: Vec<BaseMessage>, _stop: Option<Vec<String>>, _run_manager: Option<&CallbackManagerForLLMRun>, ) -> Result<ChatGenerationStream>
Sourcefn _astream<'life0, 'life1, 'async_trait>(
&'life0 self,
messages: Vec<BaseMessage>,
stop: Option<Vec<String>>,
_run_manager: Option<&'life1 AsyncCallbackManagerForLLMRun>,
) -> Pin<Box<dyn Future<Output = Result<ChatGenerationStream>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn _astream<'life0, 'life1, 'async_trait>(
&'life0 self,
messages: Vec<BaseMessage>,
stop: Option<Vec<String>>,
_run_manager: Option<&'life1 AsyncCallbackManagerForLLMRun>,
) -> Pin<Box<dyn Future<Output = Result<ChatGenerationStream>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
Async stream the output of the model.
Default implementation calls _stream.
Sourcefn get_first_message(&self, result: &ChatResult) -> Result<AIMessage>
fn get_first_message(&self, result: &ChatResult) -> Result<AIMessage>
Get the first AI message from a chat result.
Helper method to extract the first generation’s message as an AIMessage.
Sourcefn _combine_llm_outputs(
&self,
_llm_outputs: &[Option<HashMap<String, Value>>],
) -> HashMap<String, Value>
fn _combine_llm_outputs( &self, _llm_outputs: &[Option<HashMap<String, Value>>], ) -> HashMap<String, Value>
Combine LLM outputs from multiple results.
This method is called after generating results from multiple prompts to combine any LLM-specific output information.
Default implementation returns an empty HashMap. Subclasses can override to combine provider-specific output data.
Sourcefn _convert_cached_generations(
&self,
cache_val: Vec<Generation>,
) -> Vec<ChatGeneration>
fn _convert_cached_generations( &self, cache_val: Vec<Generation>, ) -> Vec<ChatGeneration>
Convert cached Generation objects to ChatGeneration objects.
Handle case where cache contains Generation objects instead of ChatGeneration objects. This can happen due to serialization/deserialization issues or legacy cache data.
Sourcefn _get_invocation_params(
&self,
stop: Option<&[String]>,
kwargs: Option<&HashMap<String, Value>>,
) -> HashMap<String, Value>
fn _get_invocation_params( &self, stop: Option<&[String]>, kwargs: Option<&HashMap<String, Value>>, ) -> HashMap<String, Value>
Get invocation parameters for tracing.
Returns a HashMap containing the model configuration and stop sequences.
Sourcefn _get_llm_string(
&self,
stop: Option<&[String]>,
kwargs: Option<&HashMap<String, Value>>,
) -> String
fn _get_llm_string( &self, stop: Option<&[String]>, kwargs: Option<&HashMap<String, Value>>, ) -> String
Get the LLM string for cache key generation.
This string uniquely identifies the model configuration for caching purposes.
Sourcefn has_stream_impl(&self) -> bool
fn has_stream_impl(&self) -> bool
Check if _stream is implemented (not the default).
This is used by _should_stream to determine if streaming is available.
Implementations that override _stream should also override this to return true.
Sourcefn has_astream_impl(&self) -> bool
fn has_astream_impl(&self) -> bool
Check if _astream is implemented (not the default).
This is used by _should_stream to determine if async streaming is available.
Implementations that override _astream should also override this to return true.
Sourcefn has_streaming_field(&self) -> Option<bool>
fn has_streaming_field(&self) -> Option<bool>
Check if streaming is enabled via a model field.
Override this if the model has a streaming field that should be checked.
Sourcefn _should_stream(
&self,
async_api: bool,
has_tools: bool,
stream_kwarg: Option<bool>,
run_manager: Option<&[Arc<dyn BaseCallbackHandler>]>,
) -> bool
fn _should_stream( &self, async_api: bool, has_tools: bool, stream_kwarg: Option<bool>, run_manager: Option<&[Arc<dyn BaseCallbackHandler>]>, ) -> bool
Determine if a given model call should hit the streaming API.
This method mirrors Python’s _should_stream behavior:
- Check if streaming is implemented (either sync or async)
- Check if streaming has been disabled on this instance
- Check if streaming is disabled for tool calling and tools are present
- Check if streaming field is set on the model
- Check if any streaming callback handlers are present
§Arguments
async_api- Whether this is an async API callhas_tools- Whether tools are present in the callstream_kwarg- Optional explicit stream kwarg from callerrun_manager- Optional callback manager for checking streaming handlers
§Returns
true if streaming should be used, false otherwise.
Sourcefn generate<'life0, 'async_trait>(
&'life0 self,
messages: Vec<Vec<BaseMessage>>,
stop: Option<Vec<String>>,
_callbacks: Option<Callbacks>,
) -> Pin<Box<dyn Future<Output = Result<LLMResult>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
fn generate<'life0, 'async_trait>(
&'life0 self,
messages: Vec<Vec<BaseMessage>>,
stop: Option<Vec<String>>,
_callbacks: Option<Callbacks>,
) -> Pin<Box<dyn Future<Output = Result<LLMResult>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
Generate from a batch of message lists.
This method should make use of batched calls for models that expose a batched API.
Use this method when you want to:
- Take advantage of batched calls
- Need more output from the model than just the top generated value
- Are building chains that are agnostic to the underlying language model type
§Arguments
messages- List of message lists.stop- Stop words to use when generating.callbacks- Callbacks to pass through.
§Returns
An LLMResult containing a list of candidate ChatGeneration objects.
Sourcefn agenerate<'life0, 'async_trait>(
&'life0 self,
messages: Vec<Vec<BaseMessage>>,
stop: Option<Vec<String>>,
_callbacks: Option<Callbacks>,
) -> Pin<Box<dyn Future<Output = Result<LLMResult>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
fn agenerate<'life0, 'async_trait>(
&'life0 self,
messages: Vec<Vec<BaseMessage>>,
stop: Option<Vec<String>>,
_callbacks: Option<Callbacks>,
) -> Pin<Box<dyn Future<Output = Result<LLMResult>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
Async version of generate.
Sourcefn _call_async<'life0, 'async_trait>(
&'life0 self,
messages: Vec<BaseMessage>,
stop: Option<Vec<String>>,
callbacks: Option<Callbacks>,
) -> Pin<Box<dyn Future<Output = Result<BaseMessage>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
fn _call_async<'life0, 'async_trait>(
&'life0 self,
messages: Vec<BaseMessage>,
stop: Option<Vec<String>>,
callbacks: Option<Callbacks>,
) -> Pin<Box<dyn Future<Output = Result<BaseMessage>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
Async call helper.
This is a convenience method that wraps agenerate for single-message calls.
Sourcefn generate_with_tools<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
messages: Vec<BaseMessage>,
_tools: &'life1 [ToolDefinition],
_tool_choice: Option<&'life2 ToolChoice>,
stop: Option<Vec<String>>,
) -> Pin<Box<dyn Future<Output = Result<AIMessage>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
fn generate_with_tools<'life0, 'life1, 'life2, 'async_trait>(
&'life0 self,
messages: Vec<BaseMessage>,
_tools: &'life1 [ToolDefinition],
_tool_choice: Option<&'life2 ToolChoice>,
stop: Option<Vec<String>>,
) -> Pin<Box<dyn Future<Output = Result<AIMessage>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
'life2: 'async_trait,
Generate a response from the model with tools.
This is the preferred method when tool calling is needed.
Default implementation ignores tools and calls _generate.
§Arguments
messages- The conversation history.tools- Tool definitions for the model to use.tool_choice- Optional configuration for tool selection.stop- Optional stop sequences.
§Returns
An AIMessage containing the generated response.
Sourcefn convert_input(&self, input: LanguageModelInput) -> Result<Vec<BaseMessage>>
fn convert_input(&self, input: LanguageModelInput) -> Result<Vec<BaseMessage>>
Convert input to messages.
Sourcefn invoke<'life0, 'async_trait>(
&'life0 self,
input: LanguageModelInput,
) -> Pin<Box<dyn Future<Output = Result<AIMessage>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
fn invoke<'life0, 'async_trait>(
&'life0 self,
input: LanguageModelInput,
) -> Pin<Box<dyn Future<Output = Result<AIMessage>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
Invoke the model with input.
Sourcefn ainvoke<'life0, 'async_trait>(
&'life0 self,
input: LanguageModelInput,
) -> Pin<Box<dyn Future<Output = Result<AIMessage>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
fn ainvoke<'life0, 'async_trait>(
&'life0 self,
input: LanguageModelInput,
) -> Pin<Box<dyn Future<Output = Result<AIMessage>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
Async invoke the model.
Sourcefn bind_tools(
&self,
_tools: &[Arc<dyn BaseTool>],
_tool_choice: Option<ToolChoice>,
) -> Result<()>
fn bind_tools( &self, _tools: &[Arc<dyn BaseTool>], _tool_choice: Option<ToolChoice>, ) -> Result<()>
Bind tools to the model.
This method returns a Runnable that can call tools. The default implementation raises NotImplementedError.
§Arguments
tools- Sequence of tools to bind to the model.tool_choice- Optional tool choice configuration.
§Returns
A Result with error indicating tools are not supported.
§Note
Provider implementations should override this method to return a configured model.
Sourcefn get_tool_definitions(
&self,
tools: &[Arc<dyn BaseTool>],
) -> Vec<ToolDefinition>
fn get_tool_definitions( &self, tools: &[Arc<dyn BaseTool>], ) -> Vec<ToolDefinition>
Get tool definitions from tools.
Helper method to convert tools to their definitions.
Sourcefn stream<'life0, 'async_trait>(
&'life0 self,
input: LanguageModelInput,
stop: Option<Vec<String>>,
) -> Pin<Box<dyn Future<Output = Result<AIMessageChunkStream>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
fn stream<'life0, 'async_trait>(
&'life0 self,
input: LanguageModelInput,
stop: Option<Vec<String>>,
) -> Pin<Box<dyn Future<Output = Result<AIMessageChunkStream>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
Generate a streaming response from the model.
This is the main streaming API. It yields AIMessageChunks.
Providers should override _stream for native streaming support.
§Arguments
input- The input to the model (string, messages, or PromptValue).stop- Optional stop sequences.
§Returns
A stream of AIMessageChunks.
Sourcefn astream<'life0, 'async_trait>(
&'life0 self,
input: LanguageModelInput,
stop: Option<Vec<String>>,
) -> Pin<Box<dyn Future<Output = Result<AIMessageChunkStream>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
fn astream<'life0, 'async_trait>(
&'life0 self,
input: LanguageModelInput,
stop: Option<Vec<String>>,
) -> Pin<Box<dyn Future<Output = Result<AIMessageChunkStream>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
Async stream the model output.
This is the async version of stream. It yields AIMessageChunks.
Providers should override _astream for native async streaming support.
§Arguments
input- The input to the model (string, messages, or PromptValue).stop- Optional stop sequences.
§Returns
A stream of AIMessageChunks.
Sourcefn stream_generations<'life0, 'life1, 'async_trait>(
&'life0 self,
messages: Vec<BaseMessage>,
stop: Option<Vec<String>>,
run_manager: Option<&'life1 CallbackManagerForLLMRun>,
) -> Pin<Box<dyn Future<Output = Result<ChatGenerationStream>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn stream_generations<'life0, 'life1, 'async_trait>(
&'life0 self,
messages: Vec<BaseMessage>,
stop: Option<Vec<String>>,
run_manager: Option<&'life1 CallbackManagerForLLMRun>,
) -> Pin<Box<dyn Future<Output = Result<ChatGenerationStream>> + Send + 'async_trait>>where
Self: Sync + 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
Stream ChatGenerationChunk objects from the model.
This is a lower-level streaming API that yields ChatGenerationChunks directly.
Most users should use stream() or astream() instead.
§Arguments
messages- The conversation history.stop- Optional stop sequences.run_manager- Optional callback manager for the run.
§Returns
A stream of ChatGenerationChunks.
Sourcefn get_chat_ls_params(&self, stop: Option<&[String]>) -> LangSmithParams
fn get_chat_ls_params(&self, stop: Option<&[String]>) -> LangSmithParams
Get standard params for tracing.
Sourcefn to_dict(&self) -> HashMap<String, Value>
fn to_dict(&self) -> HashMap<String, Value>
Get a dictionary representation of the model.
Returns identifying parameters plus the model type.
Sourcefn with_structured_output(
&self,
_schema: Value,
_include_raw: bool,
) -> Result<()>
fn with_structured_output( &self, _schema: Value, _include_raw: bool, ) -> Result<()>
Create a wrapper that structures model output using a schema.
This method returns a Runnable that formats outputs to match the given schema. The default implementation raises NotImplementedError.
§Arguments
schema- The output schema (as a JSON value).include_raw- If true, include raw model response in output.
§Returns
A Result with error indicating structured output is not supported.
§Note
Provider implementations should override bind_tools first, as the default
implementation uses bind_tools internally.
Sourcefn get_identifying_params(&self) -> HashMap<String, Value>
fn get_identifying_params(&self) -> HashMap<String, Value>
Get the identifying parameters for this model.
Returns a map of parameters that uniquely identify this model instance.