hoosh/lib.rs
1//! Hoosh — AI inference gateway for Rust.
2//!
3//! Multi-provider LLM routing, local model serving, speech-to-text, and
4//! token budget management. OpenAI-compatible HTTP API.
5//!
6//! > **Name**: Hoosh (Persian: هوش) — intelligence, the word for AI.
7//!
8//! # Architecture
9//!
10//! ```text
11//! Clients (tarang, daimon, agnoshi, consumer apps)
12//! │
13//! ▼
14//! Router (provider selection, load balancing, fallback)
15//! │
16//! ├──▶ Local backends (Ollama, llama.cpp, Synapse, whisper.cpp)
17//! │
18//! └──▶ Remote APIs (OpenAI, Anthropic, DeepSeek, Mistral, Groq, ...)
19//! │
20//! ▼
21//! Cache ◀── Rate Limiter ◀── Token Budget
22//! ```
23//!
24//! # Quick start
25//!
26//! ```rust,no_run
27//! use hoosh::{InferenceRequest, HooshClient};
28//!
29//! # async fn example() -> anyhow::Result<()> {
30//! let client = HooshClient::new("http://localhost:8088");
31//! let response = client.infer(&InferenceRequest {
32//! model: "llama3".into(),
33//! prompt: "Explain Rust ownership in one sentence.".into(),
34//! ..Default::default()
35//! }).await?;
36//! println!("{}", response.text);
37//! # Ok(())
38//! # }
39//! ```
40
41pub mod audit;
42pub mod budget;
43pub mod cache;
44pub mod client;
45pub mod config;
46pub mod cost;
47pub mod error;
48pub mod events;
49#[cfg(feature = "hwaccel")]
50pub mod hardware;
51pub mod health;
52pub mod inference;
53pub mod metrics;
54pub mod middleware;
55pub mod provider;
56pub mod queue;
57pub mod router;
58pub mod server;
59#[cfg(feature = "otel")]
60pub mod telemetry;
61
62pub use budget::{TokenBudget, TokenPool};
63pub use cache::ResponseCache;
64pub use client::HooshClient;
65pub use error::HooshError;
66pub use inference::{InferenceRequest, InferenceResponse, ModelInfo};
67pub use provider::{LlmProvider, ProviderRegistry, ProviderType};
68pub use router::Router;
69
70#[cfg(test)]
71mod tests;