llm_stack_ollama/lib.rs
1//! Ollama provider for the llm-stack SDK.
2//!
3//! This crate implements [`Provider`](llm_stack::Provider) for Ollama's
4//! Chat API, supporting both non-streaming and streaming generation
5//! with tool calling.
6//!
7//! Ollama runs locally and requires no authentication by default.
8//!
9//! # Quick start
10//!
11//! ```rust,no_run
12//! use llm_stack_ollama::{OllamaConfig, OllamaProvider};
13//! use llm_stack::{ChatMessage, ChatParams, Provider};
14//!
15//! # async fn example() -> Result<(), llm_stack::LlmError> {
16//! let provider = OllamaProvider::new(OllamaConfig::default());
17//!
18//! let params = ChatParams {
19//! messages: vec![ChatMessage::user("Hello!")],
20//! ..Default::default()
21//! };
22//!
23//! let response = provider.generate(¶ms).await?;
24//! println!("{}", response.text().unwrap_or("no text"));
25//! # Ok(())
26//! # }
27//! ```
28
29#![warn(missing_docs)]
30
31mod config;
32mod convert;
33mod factory;
34mod provider;
35mod stream;
36mod types;
37
38pub use config::OllamaConfig;
39pub use factory::{OllamaFactory, register_global};
40pub use provider::OllamaProvider;