Skip to main content

swarm_engine/
lib.rs

1//! # SwarmEngine
2//!
3//! High-throughput, low-latency Agent Swarm orchestration framework.
4//!
5//! SwarmEngine provides a framework for orchestrating multiple AI agents working
6//! together on complex tasks. It supports various LLM backends and includes
7//! built-in evaluation capabilities.
8//!
9//! ## Quick Start
10//!
11//! ```rust,ignore
12//! use swarm_engine::prelude::*;
13//!
14//! // Create an environment and orchestrator
15//! let config = SwarmConfig::default();
16//! let orchestrator = OrchestratorBuilder::new(config)
17//!     .environment(env)
18//!     .manager(manager)
19//!     .build()?;
20//!
21//! // Run the swarm
22//! orchestrator.run().await;
23//! ```
24//!
25//! ## Feature Flags
26//!
27//! - `eval` - Enable evaluation framework
28//! - `ollama` - Enable Ollama backend
29//! - `llama-server` - Enable llama.cpp server backend
30//! - `llama-cpp` - Enable llama.cpp native backend
31//! - `cuda` - Enable CUDA support
32//! - `metal` - Enable Metal support (Apple Silicon)
33//! - `full` - Enable eval + all HTTP-based LLM backends
34
35// Re-export core types
36pub use swarm_engine_core::*;
37
38// Re-export LLM types
39pub use swarm_engine_llm as llm;
40
41// Re-export eval types when enabled
42#[cfg(feature = "eval")]
43pub use swarm_engine_eval as eval;
44
45/// Prelude module for convenient imports
46pub mod prelude {
47    // Re-export core prelude
48    pub use swarm_engine_core::prelude::*;
49
50    // LLM types
51    pub use swarm_engine_llm::{
52        LlamaCppServerConfig, LlamaCppServerDecider, LlmBatchInvoker, LlmDecider, LlmDeciderConfig,
53        LoraConfig, OllamaConfig, OllamaDecider,
54    };
55
56    // Eval types (when enabled)
57    #[cfg(feature = "eval")]
58    pub use swarm_engine_eval::prelude::*;
59}