Skip to main content

Crate llmg_providers

Crate llmg_providers 

Source
Expand description

§LLMG Providers

LLM provider implementations for the LLMG ecosystem. Each module contains a client that implements the llmg_core::provider::Provider trait.

§Usage

use llmg_providers::openai::OpenAiClient;
use llmg_core::provider::Provider;

let client = OpenAiClient::from_env().unwrap();

Re-exports§

pub use ai21::Ai21Client;
pub use aiml::AimlClient;
pub use aleph_alpha::AlephAlphaClient;
pub use anthropic::AnthropicClient;
pub use antigravity::AntigravityClient;
pub use anyscale::AnyscaleClient;
pub use apertis_ai::ApertisAiClient;
pub use aws_sagemaker::AwsSagemakerClient;
pub use azure::AzureOpenAiClient;
pub use azure_ai::AzureAiClient;
pub use bedrock::BedrockClient;
pub use cerebras::CerebrasClient;
pub use chatjimmy::ChatJimmyClient;
pub use chutes::ChutesClient;
pub use cohere::CohereClient;
pub use comet::CometClient;
pub use compactifai::CompactifAiClient;
pub use custom_llm_server::CustomLlmServerClient;
pub use deepgram::DeepgramClient;
pub use deepinfra::DeepInfraClient;
pub use deepseek::DeepseekClient;
pub use docker_runner::DockerRunnerClient;
pub use elevenlabs::ElevenLabsClient;
pub use fal_ai::FalAiClient;
pub use featherless_ai::FeatherlessAiClient;
pub use firecrawl::FirecrawlClient;
pub use fireworks_ai::FireworksAiClient;
pub use friendliai::FriendliaiClient;
pub use github_copilot::GitHubCopilotClient;
pub use groq::GroqClient;
pub use helicone::HeliconeClient;
pub use heroku::HerokuClient;
pub use huggingface::HuggingFaceClient;
pub use hyperbolic::HyperbolicClient;
pub use infinity::InfinityClient;
pub use jina::JinaClient;
pub use langgraph::LangGraphClient;
pub use litellm_proxy::LitellmProxyClient;
pub use llamafile::LlamafileClient;
pub use lm_studio::LmStudioClient;
pub use meta_llama::MetaLlamaClient;
pub use milvus::MilvusClient;
pub use minimax::MiniMaxClient;
pub use mistral::MistralClient;
pub use nano_gpt::NanoGptClient;
pub use nscale::NscaleClient;
pub use octoai::OctoAiClient;
pub use ollama::OllamaClient;
pub use oobabooga::OobaboogaClient;
pub use openai::OpenAiClient;
pub use openrouter::OpenRouterClient;
pub use perplexity::PerplexityClient;
pub use petals::PetalsClient;
pub use poe::PoeClient;
pub use polly::PollyClient;
pub use publicai::PublicAiClient;
pub use pydantic_ai_agent::PydanticAiAgentClient;
pub use runway::RunwayClient;
pub use sambanova::SambaNovaClient;
pub use stability::StabilityAiClient;
pub use synthetic::SyntheticClient;
pub use together_ai::TogetherAiClient;
pub use triton::TritonClient;
pub use v0::V0Client;
pub use vertex_ai::VertexAiClient;
pub use vllm::VllmClient;
pub use volcano::VolcanoClient;
pub use voyageai::VoyageaiClient;
pub use watsonx::WatsonxClient;
pub use xai::XaiClient;
pub use xinference::XinferenceClient;
pub use z_ai::ZaiClient;

Modules§

ai21
aiml
aleph_alpha
anthropic
antigravity
Google Antigravity API client for LLMG
anyscale
apertis_ai
aws_sagemaker
azure
azure_ai
bedrock
cerebras
chatjimmy
ChatJimmy API client for LLMG
chutes
cohere
Cohere API client for LLMG
comet
compactifai
custom_llm_server
Custom LLM Server provider for LLMG
deepgram
deepinfra
deepseek
docker_runner
Docker Model Runner provider for LLMG
elevenlabs
fal_ai
featherless_ai
firecrawl
fireworks_ai
friendliai
github_copilot
GitHub Copilot API client for LLMG
groq
helicone
heroku
huggingface
hyperbolic
infinity
jina
langgraph
litellm_proxy
LiteLLM Proxy client for LLMG
llamafile
Llamafile local LLM provider for LLMG
lm_studio
LM Studio local LLM provider for LLMG
meta_llama
milvus
minimax
mistral
nano_gpt
nscale
octoai
ollama
oobabooga
oobabooga (Text Generation WebUI) local LLM provider for LLMG
openai
openrouter
OpenRouter API client for LLMG
perplexity
petals
Petals decentralized LLM provider for LLMG
poe
polly
publicai
pydantic_ai_agent
runway
sambanova
stability
synthetic
together_ai
triton
Triton Inference Server provider for LLMG
utils
v0
vertex_ai
vllm
vLLM local LLM provider for LLMG
volcano
voyageai
watsonx
xai
xinference
z_ai