mofa_foundation/llm/mod.rs
1//! LLM 模块
2//!
3//! 提供 LLM (Large Language Model) 集成支持
4//!
5//! # 架构
6//!
7//! ```text
8//! ┌─────────────────────────────────────────────────────────────────────┐
9//! │ LLM 模块架构 │
10//! ├─────────────────────────────────────────────────────────────────────┤
11//! │ │
12//! │ ┌─────────────┐ ┌─────────────┐ ┌─────────────────────────┐ │
13//! │ │ LLMClient │───▶│ Provider │───▶│ 具体实现 │ │
14//! │ │ (高级API) │ │ (trait) │ │ - OpenAI │ │
15//! │ └─────────────┘ └─────────────┘ │ - Anthropic │ │
16//! │ │ │ - Ollama │ │
17//! │ ▼ │ - 自定义... │ │
18//! │ ┌─────────────┐ └─────────────────────────┘ │
19//! │ │ ChatSession │ │
20//! │ │ (会话管理) │ │
21//! │ └─────────────┘ │
22//! │ │ │
23//! │ ▼ │
24//! │ ┌─────────────┐ ┌─────────────┐ │
25//! │ │ LLMPlugin │───▶│ AgentPlugin │ ← 集成到 MoFA Agent │
26//! │ │ (插件封装) │ │ (trait) │ │
27//! │ └─────────────┘ └─────────────┘ │
28//! │ │ │
29//! │ ▼ │
30//! │ ┌─────────────────────────────────────────────────────────────┐ │
31//! │ │ 高级 API │ │
32//! │ ├─────────────────────────────────────────────────────────────┤ │
33//! │ │ AgentWorkflow │ 多 Agent 工作流编排 │ │
34//! │ │ AgentTeam │ 团队协作模式 (链式/并行/辩论/监督) │ │
35//! │ │ Pipeline │ 函数式流水线 API │ │
36//! │ └─────────────────────────────────────────────────────────────┘ │
37//! │ │
38//! └─────────────────────────────────────────────────────────────────────┘
39//! ```
40//!
41//! # 快速开始
42//!
43//! ## 1. 实现自定义 LLM Provider
44//!
45//! ```rust,ignore
46//! use mofa_foundation::llm::{LLMProvider, ChatCompletionRequest, ChatCompletionResponse, LLMResult};
47//!
48//! struct MyLLMProvider {
49//! api_key: String,
50//! }
51//!
52//! #[async_trait::async_trait]
53//! impl LLMProvider for MyLLMProvider {
54//! fn name(&self) -> &str {
55//! "my-llm"
56//! }
57//!
58//! fn default_model(&self) -> &str {
59//! "my-model-v1"
60//! }
61//!
62//! async fn chat(&self, request: ChatCompletionRequest) -> LLMResult<ChatCompletionResponse> {
63//! // 实现具体的 API 调用逻辑
64//! todo!()
65//! }
66//! }
67//! ```
68//!
69//! ## 2. 使用 LLMClient 进行对话
70//!
71//! ```rust,ignore
72//! use mofa_foundation::llm::{LLMClient, ChatMessage};
73//! use std::sync::Arc;
74//!
75//! let provider = Arc::new(MyLLMProvider::new("api-key"));
76//! let client = LLMClient::new(provider);
77//!
78//! // 简单问答
79//! let answer = client.ask("What is Rust?").await?;
80//!
81//! // 带系统提示的对话
82//! let response = client
83//! .chat()
84//! .system("You are a helpful coding assistant.")
85//! .user("How do I read a file in Rust?")
86//! .temperature(0.7)
87//! .max_tokens(1000)
88//! .send()
89//! .await?;
90//!
91//! info!("{}", response.content().unwrap());
92//! ```
93//!
94//! ## 3. 使用工具调用
95//!
96//! ```rust,ignore
97//! use mofa_foundation::llm::{LLMClient, Tool, ToolExecutor};
98//! use serde_json::json;
99//!
100//! // 定义工具
101//! let weather_tool = Tool::function(
102//! "get_weather",
103//! "Get weather for a location",
104//! json!({
105//! "type": "object",
106//! "properties": {
107//! "location": { "type": "string" }
108//! },
109//! "required": ["location"]
110//! })
111//! );
112//!
113//! // 实现工具执行器
114//! struct MyToolExecutor;
115//!
116//! #[async_trait::async_trait]
117//! impl ToolExecutor for MyToolExecutor {
118//! async fn execute(&self, name: &str, arguments: &str) -> LLMResult<String> {
119//! match name {
120//! "get_weather" => Ok(r#"{"temp": 22, "condition": "sunny"}"#.to_string()),
121//! _ => Err(LLMError::Other("Unknown tool".to_string()))
122//! }
123//! }
124//!
125//! async fn available_tools(&self) -> LLMResult<Vec<Tool>> {
126//! Ok(vec![weather_tool.clone()])
127//! }
128//! }
129//!
130//! // 使用自动工具调用
131//! let response = client
132//! .chat()
133//! .system("You can use tools to help answer questions.")
134//! .user("What's the weather in Tokyo?")
135//! .tool(weather_tool)
136//! .with_tool_executor(Arc::new(MyToolExecutor))
137//! .send_with_tools()
138//! .await?;
139//! ```
140//!
141//! ## 4. 作为插件集成到 Agent
142//!
143//! ```rust,ignore
144//! use mofa_foundation::llm::{LLMPlugin, LLMConfig};
145//! use mofa_sdk::kernel::MoFAAgent;
146//! use mofa_sdk::runtime::AgentBuilder;
147//!
148//! // 创建 LLM 插件
149//! let llm_plugin = LLMPlugin::new("openai-llm", provider);
150//!
151//! // 添加到 Agent
152//! let runtime = AgentBuilder::new("my-agent", "My Agent")
153//! .with_plugin(Box::new(llm_plugin))
154//! .with_agent(agent)
155//! .await?;
156//! ```
157//!
158//! ## 5. 使用会话管理
159//!
160//! ```rust,ignore
161//! use mofa_foundation::llm::{LLMClient, ChatSession};
162//!
163//! let client = LLMClient::new(provider);
164//! let mut session = ChatSession::new(client)
165//! .with_system("You are a helpful assistant.");
166//!
167//! // 多轮对话
168//! let r1 = session.send("Hello!").await?;
169//! let r2 = session.send("What did I just say?").await?; // 会记住上下文
170//!
171//! // 清空历史
172//! session.clear();
173//! ```
174//!
175//! # 高级 API
176//!
177//! ## 6. Agent 工作流编排 (AgentWorkflow)
178//!
179//! 创建复杂的多 Agent 工作流,支持条件分支、并行执行、聚合等。
180//!
181//! ```rust,ignore
182//! use mofa_foundation::llm::{AgentWorkflow, LLMAgent};
183//! use std::sync::Arc;
184//!
185//! // 创建简单的 Agent 链
186//! let workflow = agent_chain("content-pipeline", vec![
187//! ("researcher", researcher_agent.clone()),
188//! ("writer", writer_agent.clone()),
189//! ("editor", editor_agent.clone()),
190//! ]);
191//!
192//! let result = workflow.run("Write an article about Rust").await?;
193//!
194//! // 使用构建器创建更复杂的工作流
195//! let workflow = AgentWorkflow::new("complex-pipeline")
196//! .add_agent("analyzer", analyzer_agent)
197//! .add_agent("writer", writer_agent)
198//! .add_llm_router("router", router_agent, vec!["technical", "creative"])
199//! .connect("start", "analyzer")
200//! .connect("analyzer", "router")
201//! .connect_on("router", "technical", "technical")
202//! .connect_on("router", "creative", "creative")
203//! .build();
204//! ```
205//!
206//! ## 7. Agent 团队协作 (AgentTeam)
207//!
208//! 支持多种协作模式:链式、并行、辩论、监督、MapReduce。
209//!
210//! ```rust,ignore
211//! use mofa_foundation::llm::{AgentTeam, TeamPattern, AgentRole};
212//!
213//! // 使用预定义的团队模式
214//! let team = content_creation_team(researcher, writer, editor);
215//! let article = team.run("Write about AI safety").await?;
216//!
217//! // 自定义团队
218//! let team = AgentTeam::new("analysis-team")
219//! .add_member("expert1", expert1_agent)
220//! .add_member("expert2", expert2_agent)
221//! .add_member("synthesizer", synthesizer_agent)
222//! .with_pattern(TeamPattern::MapReduce)
223//! .with_aggregate_prompt("Synthesize: {results}")
224//! .build();
225//!
226//! // 辩论模式
227//! let debate = debate_team(agent1, agent2, 3); // 3 轮辩论
228//! let conclusion = debate.run("Is Rust better than Go?").await?;
229//! ```
230//!
231//! ## 8. 函数式流水线 (Pipeline)
232//!
233//! 提供简洁的函数式 API 构建 Agent 处理流程。
234//!
235//! ```rust,ignore
236//! use mofa_foundation::llm::Pipeline;
237//!
238//! // 简单流水线
239//! let result = Pipeline::new()
240//! .with_agent(translator)
241//! .map(|s| s.to_uppercase())
242//! .with_agent(summarizer)
243//! .run("Translate and summarize this text")
244//! .await?;
245//!
246//! // 带模板的流水线
247//! let result = Pipeline::new()
248//! .with_agent_template(agent, "Please analyze: {input}")
249//! .map(|s| format!("Analysis: {}", s))
250//! .run("Some data to analyze")
251//! .await?;
252//!
253//! // 流式流水线
254//! let stream = StreamPipeline::new(agent)
255//! .with_template("Tell me about {input}")
256//! .run_stream("Rust programming")
257//! .await?;
258//! ```
259
260pub mod agent;
261pub mod client;
262pub mod plugin;
263pub mod provider;
264pub mod retry;
265pub mod tool_executor;
266pub mod tool_schema;
267pub mod types;
268
269// 高级 API
270pub mod agent_workflow;
271pub mod anthropic;
272pub mod google;
273pub mod multi_agent;
274pub mod openai;
275pub mod pipeline;
276
277// Framework components
278pub mod agent_loop;
279pub mod context;
280pub mod task_orchestrator;
281pub mod vision;
282
283// Audio processing
284pub mod transcription;
285
286// Re-export 核心类型
287pub use client::{ChatRequestBuilder, ChatSession, LLMClient, function_tool};
288pub use plugin::{LLMCapability, LLMPlugin, MockLLMProvider};
289pub use provider::{
290 ChatStream, LLMConfig, LLMProvider, LLMRegistry, ModelCapabilities, ModelInfo, global_registry,
291};
292pub use retry::RetryExecutor;
293pub use tool_executor::ToolExecutor;
294pub use tool_schema::{normalize_schema, parse_schema, validate_schema};
295pub use types::*;
296
297// Re-export 标准 LLM Agent
298pub use agent::{
299 LLMAgent, LLMAgentBuilder, LLMAgentConfig, LLMAgentEventHandler, StreamEvent, TextStream,
300 simple_llm_agent,
301};
302
303// Re-export agent_from_config (when openai feature is enabled)
304pub use agent::agent_from_config;
305
306// Re-export OpenAI Provider (when enabled)
307pub use openai::{OpenAIConfig, OpenAIProvider};
308// Re-export Anthropic Provider
309pub use anthropic::{AnthropicConfig, AnthropicProvider};
310// Re-export Google Gemini Provider
311pub use google::{GeminiConfig, GeminiProvider};
312
313// Re-export 高级 API
314pub use agent_workflow::{
315 AgentEdge, AgentNode, AgentNodeType, AgentValue, AgentWorkflow, AgentWorkflowBuilder,
316 AgentWorkflowContext, agent_chain, agent_parallel, agent_router,
317};
318pub use multi_agent::{
319 AgentMember, AgentRole, AgentTeam, AgentTeamBuilder, TeamPattern, analysis_team,
320 code_review_team, content_creation_team, debate_team,
321};
322pub use pipeline::{
323 Pipeline, StreamPipeline, agent_pipe, agent_pipe_with_templates, ask_with_template, batch_ask,
324 quick_ask,
325};
326
327// Re-export framework components
328pub use agent_loop::{AgentLoop, AgentLoopConfig, AgentLoopRunner, SimpleToolExecutor};
329pub use context::{AgentContextBuilder, AgentIdentity, NoOpSkillsManager, SkillsManager};
330pub use task_orchestrator::{
331 BackgroundTask, TaskOrchestrator, TaskOrchestratorConfig, TaskOrigin, TaskResult, TaskStatus,
332};
333pub use vision::{
334 ImageDetailExt, build_vision_chat_message, build_vision_chat_message_single,
335 build_vision_message, encode_image_data_url, encode_image_url, get_mime_type,
336 image_url_from_string, image_url_with_detail, is_image_file,
337};
338// ImageDetail is already re-exported via types::*;
339
340// Compatibility re-export for older AgentLoopToolExecutor name
341#[deprecated(
342 note = "Use llm::ToolExecutor instead. AgentLoop now uses the unified ToolExecutor."
343)]
344pub use tool_executor::ToolExecutor as AgentLoopToolExecutor;
345
346// Re-export transcription module
347pub use transcription::{
348 GroqTranscriptionProvider, OpenAITranscriptionProvider, TranscriptionProvider,
349};