1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
//! The client gateway bridge is support essential for the [`client`] module. //! //! This is made available for user use if one wishes to be lower-level or avoid //! the higher functionality of the [`Client`]. //! //! Of interest are three pieces: //! //! ### [`ShardManager`] //! //! The shard manager is responsible for being a clean interface between the //! user and the shard runners, providing essential functions such as //! [`ShardManager::shutdown`] to shutdown a shard and [`ShardManager::restart`] //! to restart a shard. //! //! If you are using the `Client`, this is likely the only piece of interest to //! you. Refer to [its documentation][`ShardManager`] for more information. //! //! ### [`ShardQueuer`] //! //! The shard queuer is a light wrapper around an mpsc receiver that receives //! [`ShardManagerMessage`]s. It should be run in its own thread so it can //! receive messages to start shards in a queue. //! //! Refer to [its documentation][`ShardQueuer`] for more information. //! //! ### [`ShardRunner`] //! //! The shard runner is responsible for actually running a shard and //! communicating with its respective WebSocket client. //! //! It is performs all actions such as sending a presence update over the client //! and, with the help of the [`Shard`], will be able to determine what to do. //! This is, for example, whether to reconnect, resume, or identify with the //! gateway. //! //! ### In Conclusion //! //! For almost every - if not every - use case, you only need to _possibly_ be //! concerned about the [`ShardManager`] in this module. //! //! [`Client`]: ../../struct.Client.html //! [`client`]: ../.. //! [`Shard`]: ../../../gateway/struct.Shard.html //! [`ShardManager`]: struct.ShardManager.html //! [`ShardManager::restart`]: struct.ShardManager.html#method.restart //! [`ShardManager::shutdown`]: struct.ShardManager.html#method.shutdown //! [`ShardQueuer`]: struct.ShardQueuer.html //! [`ShardRunner`]: struct.ShardRunner.html pub mod event; mod shard_manager; mod shard_manager_monitor; mod shard_messenger; mod shard_queuer; mod shard_runner; mod shard_runner_message; pub use self::shard_manager::{ShardManager, ShardManagerOptions}; pub use self::shard_manager_monitor::ShardManagerMonitor; pub use self::shard_messenger::ShardMessenger; pub use self::shard_queuer::ShardQueuer; pub use self::shard_runner::{ShardRunner, ShardRunnerOptions}; pub use self::shard_runner_message::ShardRunnerMessage; use std::{ fmt::{ Display, Formatter, Result as FmtResult }, sync::mpsc::Sender, time::Duration as StdDuration }; use ::gateway::{ConnectionStage, InterMessage}; /// A message either for a [`ShardManager`] or a [`ShardRunner`]. /// /// [`ShardManager`]: struct.ShardManager.html /// [`ShardRunner`]: struct.ShardRunner.html #[derive(Clone, Debug)] pub enum ShardClientMessage { /// A message intended to be worked with by a [`ShardManager`]. /// /// [`ShardManager`]: struct.ShardManager.html Manager(ShardManagerMessage), /// A message intended to be worked with by a [`ShardRunner`]. /// /// [`ShardRunner`]: struct.ShardRunner.html Runner(ShardRunnerMessage), } /// A message for a [`ShardManager`] relating to an operation with a shard. /// /// [`ShardManager`]: struct.ShardManager.html #[derive(Clone, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] pub enum ShardManagerMessage { /// Indicator that a [`ShardManagerMonitor`] should restart a shard. /// /// [`ShardManagerMonitor`]: struct.ShardManagerMonitor.html Restart(ShardId), /// An update from a shard runner, ShardUpdate { id: ShardId, latency: Option<StdDuration>, stage: ConnectionStage, }, /// Indicator that a [`ShardManagerMonitor`] should fully shutdown a shard /// without bringing it back up. /// /// [`ShardManagerMonitor`]: struct.ShardManagerMonitor.html Shutdown(ShardId), /// Indicator that a [`ShardManagerMonitor`] should fully shutdown all shards /// and end its monitoring process for the [`ShardManager`]. /// /// [`ShardManager`]: struct.ShardManager.html /// [`ShardManagerMonitor`]: struct.ShardManagerMonitor.html ShutdownAll, /// Indicator that a [`ShardManager`] has initiated a shutdown, and for the /// component that receives this to also shutdown with no further action /// taken. ShutdownInitiated, } /// A message to be sent to the [`ShardQueuer`]. /// /// This should usually be wrapped in a [`ShardClientMessage`]. /// /// [`ShardClientMessage`]: enum.ShardClientMessage.html /// [`ShardQueuer`]: enum.ShardQueuer.html #[derive(Clone, Debug)] pub enum ShardQueuerMessage { /// Message to start a shard, where the 0-index element is the ID of the /// Shard to start and the 1-index element is the total shards in use. Start(ShardId, ShardId), /// Message to shutdown the shard queuer. Shutdown, } /// A light tuplestruct wrapper around a u64 to verify type correctness when /// working with the IDs of shards. #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] pub struct ShardId(pub u64); impl Display for ShardId { fn fmt(&self, f: &mut Formatter) -> FmtResult { write!(f, "{}", self.0) } } /// Information about a [`ShardRunner`]. /// /// The [`ShardId`] is not included because, as it stands, you probably already /// know the Id if you obtained this. /// /// [`ShardId`]: struct.ShardId.html /// [`ShardRunner`]: struct.ShardRunner.html #[derive(Debug)] pub struct ShardRunnerInfo { /// The latency between when a heartbeat was sent and when the /// acknowledgement was received. pub latency: Option<StdDuration>, /// The channel used to communicate with the shard runner, telling it /// what to do with regards to its status. pub runner_tx: Sender<InterMessage>, /// The current connection stage of the shard. pub stage: ConnectionStage, }