border_core/
base.rs

1//! Core interfaces and traits for reinforcement learning.
2//!
3//! This module provides the fundamental building blocks for reinforcement learning systems,
4//! including interfaces for environments, agents, policies, and experience replay.
5//! These interfaces define the core abstractions that enable the implementation of
6//! various reinforcement learning algorithms.
7
8mod agent;
9mod batch;
10mod env;
11mod policy;
12mod replay_buffer;
13mod step;
14pub use agent::Agent;
15pub use batch::TransitionBatch;
16pub use env::Env;
17pub use policy::{Configurable, Policy};
18pub use replay_buffer::{ExperienceBufferBase, NullReplayBuffer, ReplayBufferBase};
19use std::fmt::Debug;
20pub use step::{Info, Step, StepProcessor};
21
22/// A trait representing observations from an environment.
23///
24/// This trait defines the interface for observations in reinforcement learning.
25/// Observations represent the state of the environment as perceived by the agent.
26///
27/// # Requirements
28///
29/// Implementations must:
30/// - Be cloneable for efficient copying
31/// - Support debug formatting for logging and debugging
32/// - Provide a method to determine the number of observations
33///
34/// # Note
35///
36/// While the interface supports vectorized environments through the `len` method,
37/// the current implementation only supports single environments. Therefore,
38/// `len()` is expected to return 1 in all cases.
39///
40/// # Examples
41///
42/// ```ignore
43/// #[derive(Clone, Debug)]
44/// struct SimpleObservation {
45///     position: f32,
46///     velocity: f32,
47/// }
48///
49/// impl Obs for SimpleObservation {
50///     fn len(&self) -> usize {
51///         1  // Single observation
52///     }
53/// }
54/// ```
55pub trait Obs: Clone + Debug {
56    /// Returns the number of observations in the object.
57    ///
58    /// # Returns
59    ///
60    /// The number of observations. Currently, this should always return 1
61    /// as vectorized environments are not supported.
62    fn len(&self) -> usize;
63}
64
65/// A trait representing actions that can be taken in an environment.
66///
67/// This trait defines the interface for actions in reinforcement learning.
68/// Actions represent the decisions made by the agent that affect the environment.
69///
70/// # Requirements
71///
72/// Implementations must:
73/// - Be cloneable for efficient copying
74/// - Support debug formatting for logging and debugging
75///
76/// # Examples
77///
78/// ```ignore
79/// #[derive(Clone, Debug)]
80/// struct DiscreteAction {
81///     action: usize,
82///     num_actions: usize,
83/// }
84///
85/// impl Act for DiscreteAction {
86///     fn len(&self) -> usize {
87///         self.num_actions
88///     }
89/// }
90/// ```
91pub trait Act: Clone + Debug {
92    /// Returns the number of actions in the object.
93    ///
94    /// # Note
95    ///
96    /// This method is currently unimplemented and may be removed in future versions
97    /// as it is not used in the current implementation.
98    fn len(&self) -> usize {
99        unimplemented!();
100    }
101}