pub mod core {
pub use echo_core::llm::*;
pub mod types {
pub use echo_core::llm::types::*;
}
}
pub mod integration {
pub use echo_integration::providers::*;
}
pub mod types {
pub use echo_core::llm::types::*;
}
pub mod config {
pub use echo_integration::providers::config::*;
}
pub mod providers {
pub use echo_integration::providers::anthropic::AnthropicClient;
pub use echo_integration::providers::ollama::OllamaClient;
pub use echo_integration::providers::openai::{DefaultLlmClient, OpenAiClient};
}
use futures::Stream;
use reqwest::Client;
use reqwest::header::HeaderMap;
use std::sync::Arc;
use tokio_util::sync::CancellationToken;
pub use echo_core::llm::{ChatChunk, ChatRequest, ChatResponse, LlmClient};
pub use echo_integration::providers::anthropic::AnthropicClient;
pub use echo_integration::providers::ollama::OllamaClient;
pub use echo_integration::providers::openai::{DefaultLlmClient, OpenAiClient};
pub use config::{LlmConfig, LlmProvider};
pub use echo_integration::providers::ProviderFactory;
pub(crate) use types::{ChatCompletionChunk, ChatCompletionResponse, Message};
pub use types::{JsonSchemaSpec, Message as LlmMessage, ResponseFormat, ToolDefinition};
pub fn assemble_req_header(model: &config::ModelConfig) -> echo_core::error::Result<HeaderMap> {
echo_integration::providers::openai::assemble_req_header(model)
}
#[allow(clippy::too_many_arguments)]
pub async fn chat(
client: Arc<Client>,
model_name: &str,
messages: &[Message],
temperature: Option<f32>,
max_tokens: Option<u32>,
stream: Option<bool>,
tools: Option<Vec<ToolDefinition>>,
tool_choice: Option<String>,
response_format: Option<ResponseFormat>,
) -> echo_core::error::Result<ChatCompletionResponse> {
echo_integration::providers::openai::chat(
client,
model_name,
messages,
temperature,
max_tokens,
stream,
tools,
tool_choice,
response_format,
)
.await
}
#[allow(clippy::too_many_arguments)]
pub async fn stream_chat(
client: Arc<Client>,
model_name: &str,
messages: Vec<Message>,
temperature: Option<f32>,
max_tokens: Option<u32>,
tools: Option<Vec<ToolDefinition>>,
tool_choice: Option<String>,
response_format: Option<ResponseFormat>,
cancel_token: Option<CancellationToken>,
) -> echo_core::error::Result<
impl Stream<Item = echo_core::error::Result<ChatCompletionChunk>> + use<>,
> {
echo_integration::providers::openai::stream_chat(
client,
model_name,
messages,
temperature,
max_tokens,
tools,
tool_choice,
response_format,
cancel_token,
)
.await
}