npc_engine_core/
domain.rs

1/*
2 *  SPDX-License-Identifier: Apache-2.0 OR MIT
3 *  © 2020-2022 ETH Zurich and other contributors, see AUTHORS.txt for details
4 */
5
6use std::collections::{BTreeMap, BTreeSet};
7use std::hash::Hash;
8
9use ordered_float::NotNan;
10use rand_chacha::ChaCha8Rng;
11
12use crate::{AgentId, Behavior, Edges, IdleTask, MCTSConfiguration, Node, StateDiffRef, Task};
13
14/// The "current" value an agent has in a given state.
15pub type AgentValue = NotNan<f32>;
16
17/// A domain on which the MCTS planner can plan.
18pub trait Domain: Sized + 'static {
19    /// The state the MCTS plans on.
20    type State: std::fmt::Debug + Sized;
21    /// A compact set of changes towards a `State` that are accumulated throughout planning.
22    type Diff: std::fmt::Debug + Default + Clone + Hash + Eq;
23    /// A representation of a display action that can be fetched from a task.
24    /// We need Default trait for creating the DisplayAction for the idle placeholder task.
25    type DisplayAction: std::fmt::Debug + Default;
26
27    /// Returns all behaviors available for this domain.
28    fn list_behaviors() -> &'static [&'static dyn Behavior<Self>];
29
30    /// Gets the current value of the given agent in the given tick and world state.
31    fn get_current_value(tick: u64, state_diff: StateDiffRef<Self>, agent: AgentId) -> AgentValue;
32
33    /// Updates the list of agents which are in the horizon of the given agent in the given tick and world state.
34    fn update_visible_agents(
35        start_tick: u64,
36        tick: u64,
37        state_diff: StateDiffRef<Self>,
38        agent: AgentId,
39        agents: &mut BTreeSet<AgentId>,
40    );
41
42    /// Gets all possible valid tasks for a given agent in a given tick and world state.
43    fn get_tasks(
44        tick: u64,
45        state_diff: StateDiffRef<'_, Self>,
46        agent: AgentId,
47    ) -> Vec<Box<dyn Task<Self>>> {
48        let mut actions = Vec::new();
49        Self::list_behaviors()
50            .iter()
51            .for_each(|behavior| behavior.add_tasks(tick, state_diff, agent, &mut actions));
52
53        actions.dedup();
54        actions
55    }
56
57    /// Gets a textual description of the given world state.
58    /// This will be used by the graph tool to show in each node, and the log tool to dump the state.
59    fn get_state_description(_state_diff: StateDiffRef<Self>) -> String {
60        String::new()
61    }
62
63    /// Gets the new agents present in a diff but not in a state.
64    fn get_new_agents(_state_diff: StateDiffRef<Self>) -> Vec<AgentId> {
65        vec![]
66    }
67
68    /// Gets the display actions for idle task.
69    fn display_action_task_idle() -> Self::DisplayAction {
70        Default::default()
71    }
72
73    /// Gets the display actions for planning task.
74    fn display_action_task_planning() -> Self::DisplayAction {
75        Default::default()
76    }
77}
78
79/// An estimator of state-value function.
80pub trait StateValueEstimator<D: Domain>: Send {
81    /// Takes the state of an explored node and returns the estimated expected (discounted) values.
82    ///
83    /// Returns None if the passed node has no unexpanded edge.
84    #[allow(clippy::too_many_arguments)]
85    fn estimate(
86        &mut self,
87        rnd: &mut ChaCha8Rng,
88        config: &MCTSConfiguration,
89        initial_state: &D::State,
90        start_tick: u64,
91        node: &Node<D>,
92        edges: &Edges<D>,
93        depth: u32,
94    ) -> Option<BTreeMap<AgentId, f32>>;
95}
96
97/// Domains who want to use planning tasks must implement this.
98pub trait DomainWithPlanningTask: Domain {
99    /// A fallback task, in case, during planning, the world evolved in a different direction than what the MCTS tree explored.
100    fn fallback_task(_agent: AgentId) -> Box<dyn Task<Self>> {
101        Box::new(IdleTask)
102    }
103}