Skip to main content

converge_provider/
lib.rs

1// Copyright 2024-2025 Aprio One AB, Sweden
2// Author: Kenneth Pernyer, kenneth@aprio.one
3// SPDX-License-Identifier: MIT
4// See LICENSE file in the project root for full license information.
5
6//! Capability adapters for the Converge runtime.
7//!
8//! > **Providers produce observations, never decisions.**
9//! > **Converge converges; providers adapt.**
10//!
11//! This crate provides capability adapters (providers) that connect Converge
12//! workflows to external systems. Providers implement traits defined in
13//! `converge-core` and return structured observations with provenance.
14//!
15//! # What Is a Provider?
16//!
17//! A provider is an **adapter** that:
18//! - Implements capability traits (`LlmProvider`, `Embedding`, `VectorRecall`, etc.)
19//! - Returns observations (not facts, not decisions)
20//! - Includes provenance metadata for tracing
21//! - Is stateless (no hidden lifecycle state)
22//!
23//! A provider is **NOT**:
24//! - An agent (agents live in `converge-core`)
25//! - Orchestration (no workflows, no scheduling)
26//! - Domain logic (business rules live in `converge-domain`)
27//!
28//! # Available Providers
29//!
30//! ## Remote Providers
31//! - [`AnthropicProvider`] - Claude API (Anthropic)
32//! - [`OpenAiProvider`] - GPT-4, GPT-3.5 (`OpenAI`)
33//! - [`GeminiProvider`] - Gemini Pro (Google)
34//! - [`PerplexityProvider`] - Perplexity AI
35//! - [`QwenProvider`] - Qwen models (Alibaba Cloud)
36//! - [`OpenRouterProvider`] - Multi-provider aggregator
37//! - [`MinMaxProvider`] - `MinMax` AI
38//! - [`GrokProvider`] - Grok (xAI)
39//! - [`MistralProvider`] - Mistral AI
40//! - [`DeepSeekProvider`] - `DeepSeek` AI
41//! - [`BaiduProvider`] - Baidu ERNIE
42//! - [`ZhipuProvider`] - Zhipu GLM
43//! - [`KimiProvider`] - Kimi (Moonshot AI)
44//! - [`ApertusProvider`] - Apertus (Switzerland, EU digital sovereignty)
45//!
46//! ## Local Providers
47//! - [`OllamaProvider`] - Local models via Ollama (Qwen, Llama, Mistral, etc.)
48//!
49//! # Prompt Structuring
50//!
51//! This crate provides provider-specific prompt structuring and optimization:
52//!
53//! - [`ProviderPromptBuilder`]: Builds prompts optimized for specific providers
54//! - [`StructuredResponseParser`]: Parses structured responses (XML/JSON)
55//! - Helper functions: [`build_claude_prompt`], [`build_openai_prompt`]
56//!
57//! # Examples
58//!
59//! ## Using Anthropic (Claude)
60//!
61//! ```ignore
62//! use converge_provider::{AnthropicProvider, build_claude_prompt, StructuredResponseParser};
63//! use converge_traits::llm::{LlmProvider, LlmRequest};
64//! use converge_core::prompt::{AgentRole, OutputContract, PromptContext};
65//! use converge_core::context::ContextKey;
66//!
67//! let provider = AnthropicProvider::from_env("claude-sonnet-4-6")?;
68//!
69//! // Build optimized prompt with XML structure
70//! let prompt = build_claude_prompt(
71//!     AgentRole::Proposer,
72//!     "extract-competitors",
73//!     PromptContext::new(),
74//!     OutputContract::new("proposed-fact", ContextKey::Competitors),
75//!     vec![],
76//! );
77//!
78//! let response = provider.complete(&LlmRequest::new(prompt))?;
79//!
80//! // Parse structured XML response
81//! let proposals = StructuredResponseParser::parse_claude_xml(
82//!     &response,
83//!     ContextKey::Competitors,
84//!     "anthropic",
85//! );
86//! ```
87//!
88//! ## Using `OpenAI`
89//!
90//! ```ignore
91//! use converge_provider::OpenAiProvider;
92//! use converge_traits::llm::{LlmProvider, LlmRequest};
93//!
94//! let provider = OpenAiProvider::from_env("gpt-4")?;
95//! let response = provider.complete(&LlmRequest::new("Hello!"))?;
96//! ```
97//!
98//! ## Using `OpenRouter` (Multi-Provider)
99//!
100//! ```ignore
101//! use converge_provider::OpenRouterProvider;
102//! use converge_traits::llm::{LlmProvider, LlmRequest};
103//!
104//! // Access any provider through OpenRouter
105//! let provider = OpenRouterProvider::from_env("anthropic/claude-3-opus")?;
106//! let response = provider.complete(&LlmRequest::new("Hello!"))?;
107//! ```
108
109// Core contract types
110pub mod contract;
111
112// LLM Backend implementations (unified LlmBackend trait from converge-core)
113pub mod llm;
114
115// LLM providers (simple LlmProvider trait)
116#[cfg(feature = "anthropic")]
117mod anthropic;
118#[cfg(feature = "apertus")]
119mod apertus;
120#[cfg(feature = "baidu")]
121mod baidu;
122
123// Search providers
124#[cfg(feature = "brave")]
125pub mod brave;
126
127// OCR / Document AI providers
128pub mod ocr;
129mod capability_registry;
130mod common;
131#[cfg(feature = "deepseek")]
132mod deepseek;
133mod factory;
134mod fake;
135#[cfg(feature = "gemini")]
136mod gemini;
137#[cfg(feature = "grok")]
138mod grok;
139#[cfg(feature = "kimi")]
140mod kimi;
141#[cfg(feature = "minmax")]
142mod minmax;
143#[cfg(feature = "mistral")]
144mod mistral;
145mod model_selection;
146#[cfg(feature = "ollama")]
147mod ollama;
148#[cfg(feature = "openai")]
149mod openai;
150#[cfg(feature = "openai")]
151mod openrouter;
152#[cfg(feature = "perplexity")]
153mod perplexity;
154mod prompt;
155#[cfg(feature = "qwen")]
156mod qwen;
157#[cfg(feature = "zhipu")]
158mod zhipu;
159
160// Patent providers
161#[cfg(feature = "patent")]
162pub mod patent;
163
164// Tool integration (MCP, OpenAPI, GraphQL)
165pub mod tools;
166
167// LinkedIn providers
168#[cfg(feature = "linkedin")]
169mod linkedin;
170
171// Capability providers
172pub mod embedding;
173pub mod graph;
174#[cfg(feature = "registry")]
175pub mod registry_loader;
176pub mod reranker;
177pub mod vector;
178
179// Re-exports
180#[cfg(feature = "anthropic")]
181pub use anthropic::AnthropicProvider;
182#[cfg(feature = "apertus")]
183pub use apertus::ApertusProvider;
184#[cfg(feature = "baidu")]
185pub use baidu::BaiduProvider;
186pub use capability_registry::{
187    CapabilityRegistry, CapabilityRequirements, SearchProviderMeta, WebSearchRequirements,
188};
189pub use common::{
190    ChatCompletionRequest, ChatCompletionResponse, ChatMessage, ChatUsage, HttpProviderConfig,
191    OpenAiCompatibleProvider, OpenAiStyleError, OpenAiStyleErrorDetail,
192    chat_response_to_llm_response, handle_openai_style_error, make_chat_completion_request,
193    parse_finish_reason,
194};
195#[cfg(feature = "deepseek")]
196pub use deepseek::DeepSeekProvider;
197pub use factory::{can_create_provider, create_provider, create_tool_aware_provider};
198#[cfg(feature = "gemini")]
199pub use gemini::GeminiProvider;
200#[cfg(feature = "grok")]
201pub use grok::GrokProvider;
202#[cfg(feature = "kimi")]
203pub use kimi::KimiProvider;
204#[cfg(feature = "minmax")]
205pub use minmax::MinMaxProvider;
206#[cfg(feature = "mistral")]
207pub use mistral::MistralProvider;
208pub use model_selection::{
209    FitnessBreakdown, ModelMetadata, ModelSelector, ProviderRegistry, RejectionReason,
210    SelectionResult, is_brave_available, is_provider_available,
211};
212#[cfg(feature = "ollama")]
213pub use ollama::{
214    DEFAULT_OLLAMA_URL, ModelInfo as OllamaModelInfo, ModelListEntry as OllamaModelEntry,
215    OllamaProvider,
216};
217#[cfg(feature = "openai")]
218pub use openai::OpenAiProvider;
219#[cfg(feature = "openai")]
220pub use openrouter::OpenRouterProvider;
221#[cfg(feature = "perplexity")]
222pub use perplexity::PerplexityProvider;
223pub use prompt::{
224    ProviderPromptBuilder, StructuredResponseParser, build_claude_prompt, build_openai_prompt,
225};
226#[cfg(feature = "qwen")]
227pub use qwen::QwenProvider;
228#[cfg(feature = "zhipu")]
229pub use zhipu::ZhipuProvider;
230
231// Testing utilities
232pub use fake::FakeProvider;
233
234// Contract types (re-exported for convenience)
235pub use contract::{
236    CallTimer, Capability, ProviderCallContext, ProviderMeta, ProviderObservation, Region,
237    TokenUsage, canonical_hash,
238};
239
240// LLM Backend (unified LlmBackend trait implementations)
241#[cfg(feature = "anthropic")]
242pub use llm::AnthropicBackend;
243
244// Patent providers
245#[cfg(feature = "patent")]
246pub use patent::{
247    CompositePatentProvider, PatentOperator, PatentSearchProvider, PatentSearchRequest,
248    PatentSearchResponse, PatentSearchResult, StubPatentProvider,
249};
250
251// LinkedIn providers
252#[cfg(feature = "linkedin")]
253pub use linkedin::{LinkedInApiProvider, LinkedInGetRequest, LinkedInProvider, StubLinkedInProvider};
254
255// Search providers
256#[cfg(feature = "brave")]
257pub use brave::{
258    BraveCapability, BraveSearchError, BraveSearchProvider, BraveSearchRequest,
259    BraveSearchResponse, BraveSearchResult,
260};
261
262
263// OCR / Document AI providers
264pub use ocr::{
265    // Core types
266    OcrProvider, OcrRequest, OcrResult, OcrError, OcrInput, OcrOutputFormat,
267    OcrTable, OcrImage, OcrSpan,
268    // Provenance & tracing
269    OcrProvenance, OcrPreprocessing, OcrConfidence,
270    compute_hash, with_trace_hashes,
271    // Cloud providers
272    MistralOcrProvider, DeepSeekOcrProvider, LightOnOcrProvider,
273    // Local providers (stubs for now)
274    TesseractOcrProvider, TesseractConfig, TesseractOutputFormat,
275};
276
277// Tool integration
278pub use tools::{
279    GraphQlConfig, GraphQlConverter, GraphQlOperationType, InputSchema, InlineToolConfig,
280    McpClient, McpClientBuilder, McpServerConfig, McpTransport, McpTransportType, OpenApiConfig,
281    OpenApiConverter, ParsedToolCall, SourceFilter, ToolAwareProvider, ToolAwareResponse, ToolCall,
282    ToolDefinition, ToolError, ToolErrorKind, ToolFormat, ToolHandler, ToolRegistry, ToolResult,
283    ToolResultContent, ToolSource, ToolsConfig, ToolsConfigError,
284};