pub fn train_async<A, E, R, S, P>(
    model_dir: &P,
    agent_config: &A::Config,
    agent_configs: &Vec<A::Config>,
    env_config_train: &E::Config,
    env_config_eval: &E::Config,
    step_proc_config: &S::Config,
    replay_buffer_config: &R::Config,
    actor_man_config: &ActorManagerConfig,
    async_trainer_config: &AsyncTrainerConfig
)where
    A: Agent<E, R> + SyncModel,
    E: Env,
    R: ReplayBufferBase<PushedItem = S::Output> + Send + 'static,
    S: StepProcessorBase<E>,
    A::Config: Send + 'static,
    E::Config: Send + 'static,
    S::Config: Send + 'static,
    R::PushedItem: Send + 'static,
    A::ModelInfo: Send + 'static,
    P: AsRef<Path>,
Expand description

Runs asynchronous training.

This function runs ActorManager and AsyncTrainer on threads. These communicate using crossbeam_channel. Training logs are recorded for tensorboard.

  • model_dir - Directory where trained models and tensor board log will be saved.
  • agent_config - Configuration of the agent to be trained.
  • agent_configs - Configurations of agents for asynchronous sampling. It must share the same structure of the model (SyncModel::ModelInfo), while exploration parameters can be different.
  • env_config_train - Configuration of the environment with which transitions are sampled.
  • env_config_eval - Configuration of the environment on which the agent being trained is evaluated.
  • replay_buffer_config - Configuration of the replay buffer.
  • actor_man_config - Configuration of ActorManager.
  • async_trainer_config - Configuration of AsyncTrainer.