Skip to main content

Crate ollama_oxide

Crate ollama_oxide 

Source
Expand description

§ollama-oxide

A Rust library for integrating with Ollama’s native API.

§Quick Start

§Async Example

use ollama_oxide::{OllamaClient, OllamaApiAsync, Result};

#[tokio::main]
async fn main() -> Result<()> {
    let client = OllamaClient::default()?;
    let version = client.version().await?;
    println!("Ollama version: {}", version.version);
    Ok(())
}

§Sync Example

use ollama_oxide::{OllamaClient, OllamaApiSync, Result};

fn main() -> Result<()> {
    let client = OllamaClient::default()?;
    let version = client.version_blocking()?;
    println!("Ollama version: {}", version.version);
    Ok(())
}

Re-exports§

pub use inference::ChatMessage;
pub use inference::ChatRequest;
pub use inference::ChatResponse;
pub use inference::ChatRole;
pub use inference::EmbedInput;
pub use inference::EmbedRequest;
pub use inference::EmbedResponse;
pub use inference::FormatSetting;
pub use inference::GenerateRequest;
pub use inference::GenerateResponse;
pub use inference::KeepAliveSetting;
pub use inference::Logprob;
pub use inference::ModelOptions;
pub use inference::ResponseMessage;
pub use inference::StopSetting;
pub use inference::ThinkSetting;
pub use inference::TokenLogprob;
pub use inference::VersionResponse;
pub use http::ChatStream;
pub use http::ChatStreamBlocking;
pub use http::ClientConfig;
pub use http::OllamaApiAsync;
pub use http::OllamaApiSync;
pub use http::OllamaClient;

Modules§

http
HTTP client for Ollama API
inference
Inference types for Ollama API responses and requests
prelude

Enums§

Error
Error type for all ollama-oxide operations

Type Aliases§

Result
Result type alias for ollama-oxide operations