boostr 0.1.0

ML framework built on numr - attention, quantization, model architectures
Documentation
[package]
name = "boostr"
version = "0.1.0"
edition = "2024"
rust-version = "1.85"
description = "ML framework built on numr - attention, quantization, model architectures"
license = "Apache-2.0"
repository = "https://github.com/ml-rust/boostr"
documentation = "https://docs.rs/boostr"
keywords = [
  "machine-learning",
  "deep-learning",
  "transformer",
  "inference",
  "quantization",
]
categories = ["science", "mathematics"]

[dependencies]
numr = "0.5"
half = { version = "2", features = ["bytemuck"] }
thiserror = "2.0"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
serde_yaml = "0.9"

# Optional: CUDA backend
cudarc = { version = "0.19", optional = true, features = [
  "cuda-version-from-build-system",
] }

# Optional: WebGPU backend
wgpu = { version = "28.0", optional = true }
bytemuck = { version = "1.24", features = ["derive"], optional = true }

# Optional: Distributed inference (nexar transport)
nexar = { version = "0.1", optional = true }
anyhow = { version = "1", optional = true }
tracing = "0.1"

rayon = "1.11"
image = { version = "0.25", default-features = false, features = [
  "png",
  "jpeg",
] }
memmap2 = "0.9"
splintr = { version = "0.9" }

[dev-dependencies]
bytemuck = { version = "1.24", features = ["derive"] }
tempfile = "3"
tokio = { version = "1", features = ["macros", "rt-multi-thread"] }
rustls = "0.23"

[features]
default = ["cpu"]

# Backend features (passed through to numr)
cpu = []
cuda = ["numr/cuda", "dep:cudarc"]
nccl = ["numr/nccl"]
wgpu = ["numr/wgpu", "dep:wgpu", "dep:bytemuck"]

# Distributed inference
distributed = ["dep:nexar", "dep:anyhow", "dep:bytemuck"]

# Precision features
f16 = ["numr/f16"]
fp8 = ["numr/fp8"]

[package.metadata.docs.rs]
no-default-features = true