mofa_ai/lib.rs
1//! MoFA API - Standard SDK for MoFA framework
2//!
3//! This crate provides a standardized API for the MoFA (Model-based Framework for Agents) framework.
4//!
5//! # Architecture Layers
6//!
7//! The SDK is organized into clear layers following microkernel architecture principles:
8//!
9//! ```text
10//! ┌─────────────────────────────────────────┐
11//! │ User Code │
12//! └─────────────────┬───────────────────────┘
13//! ↓
14//! ┌─────────────────────────────────────────┐
15//! │ SDK (Standard API Surface) │
16//! │ - kernel: Core abstractions │
17//! │ - runtime: Lifecycle management │
18//! │ - foundation: Business functionality │
19//! └─────────────────┬───────────────────────┘
20//! ```
21//!
22//! # Features
23//!
24//! - `dora` - Enable dora-rs runtime support for distributed dataflow
25//!
26//! For FFI bindings (Python, Kotlin, Swift, Java), use the `mofa-ffi` crate.
27//!
28//! # Quick Start
29//!
30//! ```toml
31//! mofa-sdk = "0.1"
32//! ```
33//!
34//! ```rust,ignore
35//! use mofa_sdk::kernel::{AgentInput, MoFAAgent};
36//! use mofa_sdk::runtime::run_agents;
37//!
38//! struct MyAgent;
39//!
40//! #[async_trait::async_trait]
41//! impl MoFAAgent for MyAgent {
42//! // implementation...
43//! }
44//!
45//! #[tokio::main]
46//! async fn main() -> anyhow::Result<()> {
47//! let outputs = run_agents(MyAgent, vec![AgentInput::text("Hello")]).await?;
48//! println!("{}", outputs[0].to_text());
49//! Ok(())
50//! }
51//! ```
52
53// =============================================================================
54// Kernel Layer - Core Abstractions
55// =============================================================================
56
57/// Core agent abstractions and extensions
58///
59/// This module provides the minimal core interfaces that all agents implement.
60/// Following microkernel principles, the core is kept minimal with optional
61/// extensions for additional capabilities.
62///
63/// # Core Trait
64///
65/// - `MoFAAgent`: The core agent interface (id, name, capabilities, execute, etc.)
66///
67/// # Extension Traits
68///
69/// - `AgentLifecycle`: pause, resume, interrupt
70/// - `AgentMessaging`: handle_message, handle_event
71/// - `AgentPluginSupport`: plugin management
72///
73/// # Example
74///
75/// ```rust,ignore
76/// use mofa_sdk::kernel::MoFAAgent;
77///
78/// #[async_trait::async_trait]
79/// impl MoFAAgent for MyAgent {
80/// fn id(&self) -> &str { "my-agent" }
81/// fn name(&self) -> &str { "My Agent" }
82/// // ... other methods
83/// }
84/// ```
85pub mod kernel {
86 //! Core abstractions and infrastructure from `mofa-kernel`.
87 //!
88 //! This module is a normalized, comprehensive facade over `mofa-kernel` with
89 //! structured submodules and curated top-level re-exports.
90
91 // ---------------------------------------------------------------------
92 // Structured submodules (full coverage)
93 // ---------------------------------------------------------------------
94 pub mod agent {
95 pub use mofa_kernel::agent::*;
96 }
97 pub mod message {
98 pub use mofa_kernel::message::*;
99 }
100 pub mod bus {
101 pub use mofa_kernel::bus::*;
102 }
103 pub mod plugin {
104 pub use mofa_kernel::plugin::*;
105 }
106 pub mod config {
107 pub use mofa_kernel::config::*;
108 }
109 pub mod core {
110 pub use mofa_kernel::core::*;
111 }
112 pub mod storage {
113 pub use mofa_kernel::storage::*;
114 }
115
116 // ---------------------------------------------------------------------
117 // Curated, commonly-used exports
118 // ---------------------------------------------------------------------
119 pub use mofa_kernel::agent::{
120 AgentCapabilities, AgentCapabilitiesBuilder, AgentContext, AgentError, AgentFactory,
121 AgentInput, AgentLifecycle, AgentMessage as CoreAgentMessage, AgentMessaging,
122 AgentMetadata, AgentOutput, AgentPluginSupport, AgentRequirements,
123 AgentRequirementsBuilder, AgentResult, AgentState, AgentStats, ChatCompletionRequest,
124 ChatCompletionResponse, ChatMessage, ContextConfig, CoordinationPattern, Coordinator,
125 DynAgent, ErrorCategory, ErrorContext, EventBuilder, EventBus, GlobalError, GlobalEvent,
126 GlobalMessage, GlobalResult, HealthStatus, InputType, InterruptResult, LLMProvider, Memory,
127 MemoryItem, MemoryStats, MemoryValue, Message, MessageContent, MessageMetadata,
128 MessageRole, MoFAAgent, OutputContent, OutputType, Reasoner, ReasoningResult,
129 ReasoningStep, ReasoningStepType, ReasoningStrategy, TokenUsage, Tool, ToolCall,
130 ToolDefinition, ToolDescriptor, ToolInput, ToolMetadata, ToolResult, ToolUsage,
131 execution_events, lifecycle, message_events, plugin_events, state_events,
132 };
133
134 // Core AgentConfig (runtime-level, lightweight)
135 pub use mofa_kernel::core::AgentConfig;
136
137 // Schema/config types for agent definitions
138 pub use mofa_kernel::agent::config::{
139 AgentConfig as AgentSchemaConfig, AgentType, ConfigFormat, ConfigLoader,
140 };
141
142 // Message-level events and task primitives (stream + scheduling included)
143 pub use mofa_kernel::message::{
144 AgentEvent, AgentMessage, SchedulingStatus, StreamControlCommand, StreamType, TaskPriority,
145 TaskRequest, TaskStatus,
146 };
147
148 // Bus
149 pub use mofa_kernel::bus::AgentBus;
150
151 // Plugin primitives
152 pub use mofa_kernel::plugin::{
153 AgentPlugin, HotReloadConfig, PluginContext, PluginEvent, PluginMetadata, PluginResult,
154 PluginState, PluginType, ReloadEvent, ReloadStrategy,
155 };
156
157 // Storage trait
158 pub use mofa_kernel::Storage;
159}
160
161// =============================================================================
162// Runtime Layer - Lifecycle and Execution
163// =============================================================================
164
165/// Agent lifecycle and execution management
166///
167/// This module provides runtime infrastructure for managing agent execution.
168///
169/// # Main Components
170///
171/// - `AgentBuilder`: Builder pattern for constructing agents
172/// - `SimpleRuntime`: Multi-agent coordination (non-dora)
173/// - `AgentRuntime`: Dora-rs integration (with `dora` feature)
174///
175/// # Example
176///
177/// ```rust,ignore
178/// use mofa_sdk::runtime::{AgentBuilder, SimpleRuntime};
179///
180/// let runtime = SimpleRuntime::new();
181/// runtime.register_agent(metadata, config, "worker").await?;
182/// ```
183pub mod runtime {
184 // Agent builder
185 pub use mofa_runtime::AgentBuilder;
186
187 // Simple runtime (non-dora)
188 pub use mofa_runtime::SimpleRuntime;
189
190 // Agent registry (runtime implementation)
191 pub use mofa_runtime::agent::{AgentFactory, AgentRegistry, RegistryStats};
192
193 // Agent runner (single-execution utilities)
194 pub use mofa_runtime::runner::{
195 AgentRunner, AgentRunnerBuilder, RunnerState, RunnerStats, run_agents,
196 };
197
198 pub use mofa_runtime::config::FrameworkConfig;
199
200 // Dora runtime (only available with dora feature)
201 #[cfg(feature = "dora")]
202 pub use mofa_runtime::{AgentRuntime, MoFARuntime};
203}
204
205// =============================================================================
206// Agent Layer - Foundation Agent Building Blocks
207// =============================================================================
208
209/// Agent building blocks and concrete implementations (foundation layer)
210pub mod agent {
211 pub use mofa_foundation::agent::*;
212}
213
214// =============================================================================
215// Prompt Layer - Prompt Composition & Management
216// =============================================================================
217
218/// Prompt templates, registries, and composition utilities
219pub mod prompt {
220 pub use mofa_foundation::prompt::*;
221}
222
223// =============================================================================
224// Coordination Layer - Task Coordination
225// =============================================================================
226
227/// Coordination strategies and schedulers (foundation layer)
228pub mod coordination {
229 pub use mofa_foundation::coordination::*;
230}
231
232// =============================================================================
233// Config Layer - Global Configuration
234// =============================================================================
235
236/// Global configuration facade (kernel + runtime + foundation)
237pub mod config {
238 /// Kernel config helpers and loaders
239 pub mod kernel {
240 pub use mofa_kernel::agent::config::*;
241 pub use mofa_kernel::config::*;
242 pub use mofa_kernel::core::AgentConfig as CoreAgentConfig;
243 }
244
245 /// Runtime config
246 pub mod runtime {
247 pub use mofa_runtime::config::*;
248 }
249
250 /// Foundation YAML config
251 pub mod foundation {
252 pub use mofa_foundation::config::*;
253 }
254
255 // Curated top-level re-exports
256 pub use mofa_foundation::config::{
257 AgentInfo, AgentYamlConfig, LLMYamlConfig, RuntimeConfig as YamlRuntimeConfig, ToolConfig,
258 };
259 pub use mofa_runtime::config::FrameworkConfig;
260}
261
262// =============================================================================
263// Foundation Layer - Business Functionality
264// =============================================================================
265
266/// Business functionality and concrete implementations
267///
268/// This module provides production-ready agent implementations and business logic.
269///
270/// # Modules
271///
272/// - `llm`: LLM integration (OpenAI, etc.)
273/// - `secretary`: Secretary agent pattern
274/// - `react`: ReAct (Reasoning + Acting) framework
275/// - `collaboration`: Multi-agent collaboration protocols
276/// - `persistence`: Database persistence
277pub mod foundation {
278 pub use super::agent;
279 pub use super::collaboration;
280 pub use super::config;
281 pub use super::coordination;
282 pub use super::llm;
283 pub use super::messaging;
284 pub use super::persistence;
285 pub use super::prompt;
286 pub use super::react;
287 pub use super::secretary;
288 pub use super::workflow;
289}
290
291// =============================================================================
292// Plugins (explicit module)
293// =============================================================================
294
295pub mod plugins {
296 pub use mofa_plugins::{
297 AgentPlugin,
298 AudioPlaybackConfig,
299 LLMPlugin,
300 LLMPluginConfig,
301 MemoryPlugin,
302 MemoryStorage,
303 MockTTSEngine,
304 // Kernel plugin primitives
305 PluginConfig,
306 PluginContext,
307 PluginEvent,
308 PluginManager,
309 PluginMetadata,
310 PluginResult,
311 PluginState,
312 PluginType,
313 RhaiPlugin,
314 RhaiPluginConfig,
315 RhaiPluginState,
316 StoragePlugin,
317 TTSCommand,
318 TTSEngine,
319 // TTS plugin types
320 TTSPlugin,
321 TTSPluginConfig,
322 TextToSpeechTool,
323 ToolCall,
324 ToolDefinition,
325 ToolExecutor,
326 ToolPlugin,
327 ToolPluginAdapter,
328 ToolResult,
329 VoiceInfo,
330 adapt_tool,
331 // TTS audio playback function
332 play_audio,
333 play_audio_async,
334 // Runtime plugin creation helpers
335 rhai_runtime,
336 tool,
337 tools,
338 wasm_runtime,
339 };
340
341 pub use mofa_kernel::PluginPriority;
342
343 // Re-export KokoroTTSWrapper when kokoro feature is enabled
344 #[cfg(feature = "kokoro")]
345 pub use mofa_plugins::KokoroTTS;
346
347 // Hot reload utilities
348 pub mod hot_reload {
349 pub use mofa_plugins::hot_reload::*;
350 }
351}
352
353// =============================================================================
354// Workflow (explicit module)
355// =============================================================================
356
357pub mod workflow {
358 //! Workflow orchestration module with LangGraph-inspired StateGraph API
359 //!
360 //! # StateGraph API (Recommended)
361 //!
362 //! The new StateGraph API provides a more intuitive way to build workflows:
363 //!
364 //! ```rust,ignore
365 //! use mofa_sdk::workflow::{StateGraphImpl, AppendReducer, OverwriteReducer, StateGraph, START, END};
366 //!
367 //! let graph = StateGraphImpl::<MyState>::new("my_workflow")
368 //! .add_reducer("messages", Box::new(AppendReducer))
369 //! .add_node("process", Box::new(ProcessNode))
370 //! .add_edge(START, "process")
371 //! .add_edge("process", END)
372 //! .compile()?;
373 //!
374 //! let result = graph.invoke(initial_state, None).await?;
375 //! ```
376 //!
377 //! # Legacy Workflow API
378 //!
379 //! The original WorkflowGraph API is still available for backward compatibility.
380
381 // Re-export kernel workflow types
382 pub use mofa_kernel::workflow::{
383 Command, CompiledGraph, ControlFlow, END, EdgeTarget, GraphConfig, GraphState, JsonState,
384 NodeFunc, Reducer, ReducerType, RemainingSteps, RuntimeContext, START, SendCommand,
385 StateSchema, StateUpdate, StepResult, StreamEvent,
386 };
387
388 // Re-export kernel StateGraph trait
389 pub use mofa_kernel::workflow::StateGraph;
390
391 // Foundation layer implementations
392 pub use mofa_foundation::workflow::{
393 // Reducers
394 AppendReducer,
395 // StateGraph implementation
396 CompiledGraphImpl,
397 CustomReducer,
398 ExtendReducer,
399 FirstReducer,
400 LastNReducer,
401 LastReducer,
402 MergeReducer,
403 OverwriteReducer,
404 StateGraphImpl,
405 create_reducer,
406 };
407
408 // Legacy workflow API
409 pub use mofa_foundation::workflow::{
410 ExecutionEvent, ExecutorConfig, WorkflowBuilder, WorkflowExecutor, WorkflowGraph,
411 WorkflowNode, WorkflowValue,
412 };
413
414 // DSL support
415 pub use mofa_foundation::workflow::dsl::{
416 AgentRef, DslError, DslResult, EdgeDefinition, LlmAgentConfig, LoopConditionDef,
417 NodeConfigDef, NodeDefinition, RetryPolicy, TaskExecutorDef, TimeoutConfig, TransformDef,
418 WorkflowConfig, WorkflowDefinition, WorkflowDslParser, WorkflowMetadata,
419 };
420}
421
422// =============================================================================
423// Prelude - Commonly Used Imports
424// =============================================================================
425
426/// Commonly used types for quick start
427pub mod prelude {
428 pub use crate::kernel::{
429 AgentCapabilities, AgentCapabilitiesBuilder, AgentContext, AgentError, AgentInput,
430 AgentMetadata, AgentOutput, AgentResult, AgentState, MoFAAgent,
431 };
432 pub use crate::runtime::{AgentBuilder, AgentRunner, SimpleRuntime, run_agents};
433 pub use async_trait::async_trait;
434}
435
436// Re-export dashboard module (only available with monitoring feature)
437#[cfg(feature = "monitoring")]
438pub mod dashboard {
439 pub use mofa_monitoring::*;
440}
441
442// Rhai scripting helpers (explicit module)
443pub mod rhai {
444 pub use mofa_extra::rhai::*;
445}
446
447mod llm_tools;
448
449// Re-export LLM module from mofa-foundation (always available)
450pub mod llm {
451 //! LLM (Large Language Model) integration module
452 //!
453 //! Provides LLM interaction capabilities for agents.
454 //!
455 //! # Quick Start
456 //!
457 //! ```rust,ignore
458 //! use mofa_sdk::llm::{LLMProvider, LLMClient, ChatMessage, ChatCompletionRequest};
459 //!
460 //! // Implement your LLM provider
461 //! struct MyProvider { /* ... */ }
462 //!
463 //! #[async_trait::async_trait]
464 //! impl LLMProvider for MyProvider {
465 //! fn name(&self) -> &str { "my-llm" }
466 //! async fn chat(&self, request: ChatCompletionRequest) -> LLMResult<ChatCompletionResponse> {
467 //! // Your implementation
468 //! }
469 //! }
470 //!
471 //! // Use the client
472 //! let client = LLMClient::new(Arc::new(MyProvider::new()));
473 //! let answer = client.ask("What is Rust?").await?;
474 //! ```
475
476 pub use crate::llm_tools::ToolPluginExecutor;
477 pub use mofa_foundation::llm::anthropic::{AnthropicConfig, AnthropicProvider};
478 pub use mofa_foundation::llm::google::{GeminiConfig, GeminiProvider};
479 pub use mofa_foundation::llm::ollama::{OllamaConfig, OllamaProvider};
480 pub use mofa_foundation::llm::openai::{OpenAIConfig, OpenAIProvider};
481 pub use mofa_foundation::llm::*;
482
483 /// 从环境变量创建 OpenAI 提供器
484 ///
485 /// 自动读取以下环境变量:
486 /// - OPENAI_API_KEY: API 密钥
487 /// - OPENAI_BASE_URL: 可选的 API 基础 URL
488 /// - OPENAI_MODEL: 可选的默认模型
489 ///
490 /// # 示例
491 ///
492 /// ```rust,ignore
493 /// use mofa_sdk::llm::openai_from_env;
494 ///
495 /// let provider = openai_from_env().unwrap();
496 /// ```
497 pub fn openai_from_env() -> Result<OpenAIProvider, crate::llm::LLMError> {
498 let api_key = std::env::var("OPENAI_API_KEY").map_err(|_| {
499 crate::llm::LLMError::ConfigError(
500 "OpenAI API key not found in environment variable OPENAI_API_KEY".to_string(),
501 )
502 })?;
503
504 let mut config = OpenAIConfig::new(api_key);
505
506 if let Ok(base_url) = std::env::var("OPENAI_BASE_URL") {
507 config = config.with_base_url(&base_url);
508 }
509
510 if let Ok(model) = std::env::var("OPENAI_MODEL") {
511 config = config.with_model(&model);
512 }
513
514 Ok(OpenAIProvider::with_config(config))
515 }
516
517 /// Create an Ollama provider from environment variables (no API key required).
518 ///
519 /// Reads:
520 /// - `OLLAMA_BASE_URL`: base URL without `/v1` suffix, e.g. `http://localhost:11434` (optional)
521 /// - `OLLAMA_MODEL`: model name, e.g. `llama3` (optional)
522 pub fn ollama_from_env() -> Result<OllamaProvider, crate::llm::LLMError> {
523 Ok(crate::llm::OllamaProvider::from_env())
524 }
525}
526
527/// 从环境变量创建 Anthropic 提供器
528///
529/// 读取环境变量:
530/// - ANTHROPIC_API_KEY (必需)
531/// - ANTHROPIC_BASE_URL (可选)
532/// - ANTHROPIC_MODEL (可选)
533pub fn anthropic_from_env() -> Result<crate::llm::AnthropicProvider, crate::llm::LLMError> {
534 let api_key = std::env::var("ANTHROPIC_API_KEY").map_err(|_| {
535 crate::llm::LLMError::ConfigError(
536 "Anthropic API key not found in ANTHROPIC_API_KEY".to_string(),
537 )
538 })?;
539
540 let mut cfg = crate::llm::AnthropicConfig::new(api_key);
541 if let Ok(base_url) = std::env::var("ANTHROPIC_BASE_URL") {
542 cfg = cfg.with_base_url(base_url);
543 }
544 if let Ok(model) = std::env::var("ANTHROPIC_MODEL") {
545 cfg = cfg.with_model(model);
546 }
547
548 Ok(crate::llm::AnthropicProvider::with_config(cfg))
549}
550
551/// 从环境变量创建 Google Gemini 提供器
552///
553/// 读取环境变量:
554/// - GEMINI_API_KEY (必需)
555/// - GEMINI_BASE_URL (可选)
556/// - GEMINI_MODEL (可选)
557pub fn gemini_from_env() -> Result<crate::llm::GeminiProvider, crate::llm::LLMError> {
558 let api_key = std::env::var("GEMINI_API_KEY").map_err(|_| {
559 crate::llm::LLMError::ConfigError("Gemini API key not found in GEMINI_API_KEY".to_string())
560 })?;
561
562 let mut cfg = crate::llm::GeminiConfig::new(api_key);
563 if let Ok(base_url) = std::env::var("GEMINI_BASE_URL") {
564 cfg = cfg.with_base_url(base_url);
565 }
566 if let Ok(model) = std::env::var("GEMINI_MODEL") {
567 cfg = cfg.with_model(model);
568 }
569
570 Ok(crate::llm::GeminiProvider::with_config(cfg))
571}
572
573// Re-export Secretary module from mofa-foundation (always available)
574pub mod secretary {
575 //! 秘书Agent模式 - 基于事件循环的智能助手
576 //!
577 //! 秘书Agent是一个面向用户的智能助手,通过与LLM交互完成个人助理工作。
578 //! 设计为与长连接配合使用,实现持续的交互式服务。
579 //!
580 //! ## 工作循环(5阶段事件循环)
581 //!
582 //! 1. **接收想法** → 记录并生成TODO
583 //! 2. **澄清需求** → 与用户交互,转换为项目文档
584 //! 3. **调度分配** → 调用对应的执行Agent
585 //! 4. **监控反馈** → 推送关键决策给人类
586 //! 5. **验收汇报** → 更新TODO,生成报告
587 //!
588 //! # Quick Start
589 //!
590 //! ```rust,ignore
591 //! use mofa_sdk::secretary::{
592 //! AgentInfo, DefaultSecretaryBuilder, ChannelConnection, DefaultInput,
593 //! SecretaryOutput, TodoPriority,
594 //! };
595 //! use std::sync::Arc;
596 //!
597 //! #[tokio::main]
598 //! async fn main() -> anyhow::Result<()> {
599 //! // 1. 创建秘书Agent
600 //! let mut backend_agent = AgentInfo::new("backend_agent", "后端Agent");
601 //! backend_agent.capabilities = vec!["backend".to_string()];
602 //! backend_agent.current_load = 0;
603 //! backend_agent.available = true;
604 //! backend_agent.performance_score = 0.9;
605 //!
606 //! let secretary = DefaultSecretaryBuilder::new()
607 //! .with_id("my_secretary")
608 //! .with_name("项目秘书")
609 //! .with_auto_clarify(true)
610 //! .with_executor(backend_agent)
611 //! .build()
612 //! .await;
613 //!
614 //! // 2. 创建通道连接
615 //! let (conn, input_tx, mut output_rx) = ChannelConnection::new_pair(32);
616 //!
617 //! // 3. 启动事件循环
618 //! let handle = secretary.start(conn).await;
619 //!
620 //! // 4. 发送用户输入
621 //! input_tx.send(DefaultInput::Idea {
622 //! content: "开发一个REST API".to_string(),
623 //! priority: Some(TodoPriority::High),
624 //! metadata: None,
625 //! }).await?;
626 //!
627 //! // 5. 处理秘书输出
628 //! while let Some(output) = output_rx.recv().await {
629 //! match output {
630 //! SecretaryOutput::Acknowledgment { message } => {
631 //! info!("秘书: {}", message);
632 //! }
633 //! SecretaryOutput::DecisionRequired { decision } => {
634 //! info!("需要决策: {}", decision.description);
635 //! // 处理决策...
636 //! }
637 //! SecretaryOutput::Report { report } => {
638 //! info!("汇报: {}", report.content);
639 //! }
640 //! _ => {}
641 //! }
642 //! }
643 //!
644 //! handle.await??;
645 //! Ok(())
646 //! }
647 //! ```
648 //!
649 //! # 自定义LLM Provider
650 //!
651 //! ```rust,ignore
652 //! use mofa_sdk::secretary::{LLMProvider, ChatMessage};
653 //! use std::sync::Arc;
654 //!
655 //! struct MyLLMProvider {
656 //! api_key: String,
657 //! }
658 //!
659 //! #[async_trait::async_trait]
660 //! impl LLMProvider for MyLLMProvider {
661 //! fn name(&self) -> &str { "my-llm" }
662 //!
663 //! async fn chat(&self, messages: Vec<ChatMessage>) -> anyhow::Result<String> {
664 //! // 调用你的LLM API
665 //! Ok("LLM响应".to_string())
666 //! }
667 //! }
668 //!
669 //! // 使用自定义LLM
670 //! let llm = Arc::new(MyLLMProvider { api_key: "...".to_string() });
671 //! let secretary = DefaultSecretaryBuilder::new()
672 //! .with_llm(llm)
673 //! .build()
674 //! .await;
675 //! ```
676
677 pub use mofa_foundation::secretary::*;
678}
679
680// Re-export React module from mofa-foundation (always available)
681pub mod react {
682 //! ReAct (Reasoning + Acting) 框架
683 //!
684 //! ReAct 是一种将推理和行动相结合的智能代理架构。
685 //! 代理通过"思考-行动-观察"循环来解决问题。
686
687 pub use mofa_foundation::react::*;
688}
689
690// Re-export collaboration module from mofa-foundation (always available)
691pub mod collaboration {
692 //! 自适应协作协议模块
693 //!
694 //! 提供多 Agent 自适应协作的标准协议实现,支持根据任务描述动态切换协作模式。
695 //!
696 //! # 标准协议
697 //!
698 //! - `RequestResponseProtocol`: 请求-响应模式,适合数据处理任务
699 //! - `PublishSubscribeProtocol`: 发布-订阅模式,适合创意生成任务
700 //! - `ConsensusProtocol`: 共识机制模式,适合决策制定任务
701 //! - `DebateProtocol`: 辩论模式,适合审查任务
702 //! - `ParallelProtocol`: 并行模式,适合分析任务
703 //!
704 //! # 快速开始
705 //!
706 //! ```rust,ignore
707 //! use mofa_sdk::collaboration::{
708 //! RequestResponseProtocol, PublishSubscribeProtocol, ConsensusProtocol,
709 //! LLMDrivenCollaborationManager,
710 //! };
711 //! use std::sync::Arc;
712 //!
713 //! #[tokio::main]
714 //! async fn main() -> anyhow::Result<()> {
715 //! let manager = LLMDrivenCollaborationManager::new("agent_001");
716 //!
717 //! // 注册标准协议
718 //! manager.register_protocol(Arc::new(RequestResponseProtocol::new("agent_001"))).await?;
719 //! manager.register_protocol(Arc::new(PublishSubscribeProtocol::new("agent_001"))).await?;
720 //! manager.register_protocol(Arc::new(ConsensusProtocol::new("agent_001"))).await?;
721 //!
722 //! // 执行任务(使用自然语言描述,系统自动选择合适的协议)
723 //! let result = manager.execute_task(
724 //! "处理数据: [1, 2, 3]", // 任务描述
725 //! serde_json::json!({"data": [1, 2, 3]})
726 //! ).await?;
727 //!
728 //! println!("Result: {:?}", result);
729 //! Ok(())
730 //! }
731 //! ```
732
733 pub use mofa_foundation::collaboration::*;
734}
735
736// =============================================================================
737// Persistence module (re-export from mofa-foundation)
738// =============================================================================
739
740// Re-export Persistence module from mofa-foundation
741pub mod persistence {
742 pub use mofa_foundation::persistence::*;
743
744 /// 快速创建带 PostgreSQL 持久化的 LLM Agent
745 ///
746 /// 自动处理:
747 /// - 数据库连接(从 DATABASE_URL)
748 /// - OpenAI Provider(从 OPENAI_API_KEY)
749 /// - 持久化插件
750 /// - 自动生成 user_id、tenant_id、agent_id 和 session_id
751 ///
752 /// # 环境变量
753 /// - DATABASE_URL: PostgreSQL 连接字符串
754 /// - OPENAI_API_KEY: OpenAI API 密钥
755 /// - USER_ID: 用户 ID(可选)
756 /// - TENANT_ID: 租户 ID(可选)
757 /// - AGENT_ID: Agent ID(可选)
758 /// - SESSION_ID: 会话 ID(可选)
759 ///
760 /// # 示例
761 ///
762 /// ```rust,ignore
763 /// use mofa_sdk::persistence::quick_agent_with_postgres;
764 ///
765 /// #[tokio::main]
766 /// async fn main() -> mofa_sdk::llm::LLMResult<()> {
767 /// let agent = quick_agent_with_postgres("你是一个有用的助手")
768 /// .await?
769 /// .with_name("聊天助手")
770 /// .build_async()
771 /// .await;
772 /// Ok(())
773 /// }
774 /// ```
775 #[cfg(all(feature = "persistence-postgres"))]
776 pub async fn quick_agent_with_postgres(
777 system_prompt: &str,
778 ) -> Result<crate::llm::LLMAgentBuilder, crate::llm::LLMError> {
779 use std::sync::Arc;
780
781 // 1. 初始化数据库
782 let store_arc = PostgresStore::from_env().await.map_err(|e| {
783 crate::llm::LLMError::Other(format!("数据库连接失败: {}", e.to_string()))
784 })?;
785
786 // 2. 从环境变量获取或生成 IDs
787 let user_id = std::env::var("USER_ID")
788 .ok()
789 .and_then(|s| uuid::Uuid::parse_str(&s).ok())
790 .unwrap_or_else(uuid::Uuid::now_v7);
791
792 let tenant_id = std::env::var("TENANT_ID")
793 .ok()
794 .and_then(|s| uuid::Uuid::parse_str(&s).ok())
795 .unwrap_or_else(uuid::Uuid::now_v7);
796
797 let agent_id = std::env::var("AGENT_ID")
798 .ok()
799 .and_then(|s| uuid::Uuid::parse_str(&s).ok())
800 .unwrap_or_else(uuid::Uuid::now_v7);
801
802 let session_id = std::env::var("SESSION_ID")
803 .ok()
804 .and_then(|s| uuid::Uuid::parse_str(&s).ok())
805 .unwrap_or_else(uuid::Uuid::now_v7);
806
807 // 3. 创建持久化插件(直接使用 Arc<PostgresStore> 作为存储)
808 let plugin = PersistencePlugin::new(
809 "persistence-plugin",
810 store_arc.clone(),
811 store_arc,
812 user_id,
813 tenant_id,
814 agent_id,
815 session_id,
816 );
817
818 // 4. 返回预配置的 builder
819 Ok(crate::llm::LLMAgentBuilder::from_env()?
820 .with_system_prompt(system_prompt)
821 .with_plugin(plugin))
822 }
823
824 /// 快速创建带内存持久化的 LLM Agent
825 ///
826 /// 使用内存存储,适合测试和开发环境。
827 ///
828 /// # 环境变量
829 /// - OPENAI_API_KEY: OpenAI API 密钥
830 ///
831 /// # 示例
832 ///
833 /// ```rust,ignore
834 /// use mofa_sdk::persistence::quick_agent_with_memory;
835 ///
836 /// #[tokio::main]
837 /// async fn main() -> mofa_sdk::llm::LLMResult<()> {
838 /// let agent = quick_agent_with_memory("你是一个有用的助手")
839 /// .await?
840 /// .with_name("聊天助手")
841 /// .build_async()
842 /// .await;
843 /// Ok(())
844 /// }
845 /// ```
846 pub async fn quick_agent_with_memory(
847 system_prompt: &str,
848 ) -> Result<crate::llm::LLMAgentBuilder, crate::llm::LLMError> {
849 let store = InMemoryStore::new();
850
851 // 生成 IDs
852 let user_id = uuid::Uuid::now_v7();
853 let tenant_id = uuid::Uuid::now_v7();
854 let agent_id = uuid::Uuid::now_v7();
855 let session_id = uuid::Uuid::now_v7();
856
857 let plugin = PersistencePlugin::from_store(
858 "persistence-plugin",
859 store,
860 user_id,
861 tenant_id,
862 agent_id,
863 session_id,
864 );
865
866 Ok(crate::llm::LLMAgentBuilder::from_env()?
867 .with_system_prompt(system_prompt)
868 .with_plugin(plugin))
869 }
870}
871
872// =============================================================================
873// Messaging module (re-export from mofa-foundation)
874// =============================================================================
875
876// Re-export Messaging module from mofa-foundation
877pub mod messaging {
878 //! Generic message bus framework for decoupled agent architectures
879 //!
880 //! Provides:
881 //! - Generic message types with pub/sub patterns
882 //! - Inbound/outbound message separation
883 //! - Trait-based message contracts
884 //!
885 //! # Quick Start
886 //!
887 //! ```rust,ignore
888 //! use mofa_sdk::messaging::{MessageBus, SimpleInboundMessage, SimpleOutboundMessage};
889 //!
890 //! let bus = MessageBus::new(100);
891 //!
892 //! // Subscribe to inbound messages
893 //! let mut rx = bus.subscribe_inbound();
894 //!
895 //! // Publish a message
896 //! let msg = SimpleInboundMessage::new("telegram", "user", "chat", "Hello");
897 //! bus.publish_inbound(msg).await?;
898 //! ```
899
900 pub use mofa_foundation::messaging::*;
901}
902
903// =============================================================================
904// Dora-rs runtime support (enabled with `dora` feature)
905// =============================================================================
906
907#[cfg(feature = "dora")]
908pub mod dora {
909 //! Dora-rs adapter for distributed dataflow runtime
910 //!
911 //! This module provides MoFA framework integration with dora-rs, including:
912 //! - DoraNode wrapper: Agent lifecycle management
913 //! - DoraOperator wrapper: Plugin capability abstraction
914 //! - DoraDataflow wrapper: Multi-agent collaborative dataflow
915 //! - DoraChannel wrapper: Cross-agent communication channel
916 //! - DoraRuntime wrapper: Complete runtime support (embedded/distributed)
917 //!
918 //! # Example
919 //!
920 //! ```rust,ignore
921 //! use mofa_sdk::dora::{DoraRuntime, RuntimeConfig, run_dataflow};
922 //!
923 //! #[tokio::main]
924 //! async fn main() -> eyre::Result<()> {
925 //! // Quick run with helper function
926 //! let result = run_dataflow("dataflow.yml").await?;
927 //! info!("Dataflow {} completed", result.uuid);
928 //!
929 //! // Or use the builder pattern
930 //! let mut runtime = DoraRuntime::embedded("dataflow.yml");
931 //! let result = runtime.run().await?;
932 //! Ok(())
933 //! }
934 //! ```
935
936 // Re-export dora adapter types
937 pub use mofa_runtime::dora_adapter::*;
938
939 // Re-export dora-specific runtime types from mofa_runtime root
940 pub use mofa_runtime::{AgentBuilder, AgentRuntime, MoFARuntime};
941}
942
943// =============================================================================
944// Agent Skills - Progressive Disclosure Skills System
945// =============================================================================
946
947// Module declaration for skills (public)
948pub mod skills;
949
950// Public skills module with re-exports