[package]
name = "relayrl_algorithms"
version = "0.2.0"
edition = "2024"
description = "A collection of Multi-Agent Deep Reinforcement Learning Algorithms (IPPO, MAPPO, etc.)"
repository = "https://github.com/jrcalgo/relayrl"
documentation = "https://docs.rs/crate/relayrl_algorithms"
keywords = ['rl-algorithms', 'multi-agent', 'deep-learning', 'ai', 'training']
categories = ['science', 'algorithms', 'simulation', 'concurrency', 'mathematics']
license = "Apache-2.0"
[features]
default = ["ndarray-backend"]
ndarray-backend = ["burn-ndarray", "relayrl_types/ndarray-backend"]
tch-backend = ["burn-tch", "relayrl_types/tch-backend"]
[dependencies]
relayrl_types = { workspace = true }
bytemuck = { workspace = true }
burn-core = { workspace = true }
burn-tensor = { workspace = true }
burn-optim = { workspace = true }
burn-nn = { workspace = true }
burn-autodiff = { workspace = true }
burn-ndarray = { workspace = true, optional = true }
burn-tch = { workspace = true, optional = true }
dashmap = { workspace = true }
ndarray = { workspace = true }
rand = { workspace = true }
tokio = { workspace = true }
thiserror = { workspace = true }
log = { workspace = true }
async-trait = { workspace = true }