pub fn train_parallel<T, E>(
    agent: &mut T,
    environment: &E,
    config: &TrainParallelConfig,
    rng_env: &mut Prng,
    rng_agent: &mut Prng,
    logger: &mut dyn StatsLogger
) where
    E: EnvStructure + Environment<Observation = <E::ObservationSpace as Space>::Element, Action = <E::ActionSpace as Space>::Element, Feedback = <E::FeedbackSpace as Space>::Element> + Sync + ?Sized,
    T: Agent<<E::ObservationSpace as Space>::Element, <E::ActionSpace as Space>::Element> + BatchUpdate<<E::ObservationSpace as Space>::Element, <E::ActionSpace as Space>::Element, Feedback = <E::FeedbackSpace as Space>::Element> + ?Sized,
    T::Actor: Send,
    T::HistoryBuffer: Send,
    E::ObservationSpace: LogElementSpace,
    E::ActionSpace: LogElementSpace,
    <E::FeedbackSpace as Space>::Element: Feedback,
    <<E::FeedbackSpace as Space>::Element as Feedback>::StepSummary: Send,
    <<E::FeedbackSpace as Space>::Element as Feedback>::EpisodeSummary: Send
Expand description

Train a batch learning agent in parallel across several threads.

The logger is used by the main thread for agent updates as well as by one of the worker threads for action and step logs.