1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
//! This module contains `Actor` trait and the runtime to execute it.
//!
//! ###
//!
//! If you want to limit types that can be used as `LiteTasks` you can
//! add a trait that required `ListTask` and implement `TaskEliminated`
//! for your trait only. The compiler will not allow you to spawn other
//! task except with your type.
//!
//! Example:
//!
//! ```
//! # use anyhow::Error;
//! # use async_trait::async_trait;
//! # use meio::{Actor, Context, IdOf, TaskEliminated, TaskError, LiteTask};
//! trait SpecificTask: LiteTask {}
//!
//! struct MyActor {}
//!
//! impl Actor for MyActor {
//!     type GroupBy = ();
//! }
//!
//! #[async_trait]
//! impl<T: SpecificTask> TaskEliminated<T, ()> for MyActor {
//!     async fn handle(
//!         &mut self,
//!         id: IdOf<T>,
//!         tag: (),
//!         result: Result<T::Output, TaskError>,
//!         ctx: &mut Context<Self>,
//!     ) -> Result<(), Error> {
//!         Ok(())
//!     }
//! }
//! ```

use crate::compat::watch;
use crate::forwarders::StreamForwarder;
use crate::handlers::{
    Consumer, Eliminated, Envelope, Interaction, InteractionDone, InteractionTask, InterruptedBy,
    Operation, Parcel, StartedBy, TaskEliminated,
};
use crate::ids::{Id, IdOf};
use crate::lifecycle::{Awake, Done, LifecycleNotifier, LifetimeTracker};
use crate::linkage::Address;
use crate::lite_runtime::{self, LiteTask, Tag, TaskAddress};
use anyhow::Error;
use async_trait::async_trait;
use futures::stream::{pending, FusedStream};
use futures::{select_biased, FutureExt, Stream, StreamExt};
use std::hash::Hash;
use thiserror::Error;
use tokio::sync::mpsc;
use uuid::Uuid;

#[derive(Debug, Error)]
enum Reason {
    #[error("Actor is terminating...")]
    Terminating,
}

const MESSAGES_CHANNEL_DEPTH: usize = 32;

/// The main trait. Your structs have to implement it to
/// be compatible with `ActorRuntime` and `Address` system.
///
/// **Recommended** to implement reactive activities.
#[async_trait]
pub trait Actor: Sized + Send + 'static {
    /// Specifies how to group child actors.
    type GroupBy: Clone + Send + Eq + Hash;

    /// Returns unique name of the `Actor`.
    /// Uses `Uuid` by default.
    fn name(&self) -> String {
        let uuid = Uuid::new_v4();
        format!("Actor:{}({})", std::any::type_name::<Self>(), uuid)
    }

    #[doc(hidden)] // Not ready yet
    /// Called when `Action` queue drained (no more messages will be sent).
    async fn queue_drained(&mut self, _ctx: &mut Context<Self>) -> Result<(), Error> {
        Ok(())
    }

    #[doc(hidden)] // Not ready yet
    /// Called when `InstantAction` queue drained (no more messages will be sent).
    async fn instant_queue_drained(&mut self, _ctx: &mut Context<Self>) -> Result<(), Error> {
        Ok(())
    }
}

/// Status of the task.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum Status {
    /// Task is alive and working.
    Alive,
    /// Task had finished.
    Stop,
}

impl Status {
    /// Is task finished yet?
    pub fn is_done(&self) -> bool {
        *self == Status::Stop
    }
}

/// Spawns `Actor` in `ActorRuntime`.
// TODO: No `Option`! Use `static Address<System>` instead.
// It can be possible when `Controller` and `Operator` will be removed.
pub(crate) fn spawn<A, S>(
    actor: A,
    supervisor: Option<Address<S>>,
    custom_name: Option<String>,
) -> Address<A>
where
    A: Actor + StartedBy<S>,
    S: Actor + Eliminated<A>,
{
    let actor_name = custom_name.unwrap_or_else(|| actor.name());
    let id = Id::new(actor_name);
    let (hp_msg_tx, hp_msg_rx) = mpsc::unbounded_channel();
    let (msg_tx, msg_rx) = mpsc::channel(MESSAGES_CHANNEL_DEPTH);
    let (join_tx, join_rx) = watch::channel(Status::Alive);
    let address = Address::new(id, hp_msg_tx, msg_tx, join_rx);
    let id: Id = address.id().into();
    // There is `Envelope` here, because it will be processed at start and
    // will never been sent to prevent other messages come before the `Awake`.
    let awake_envelope = Envelope::instant(Awake::new());
    let done_notifier = {
        match supervisor {
            None => <dyn LifecycleNotifier<_>>::ignore(),
            Some(super_addr) => {
                let op = Operation::Done { id };
                <dyn LifecycleNotifier<_>>::once(super_addr, op)
            }
        }
    };
    let context = Context {
        alive: true,
        address: address.clone(),
        lifetime_tracker: LifetimeTracker::new(),
        //terminator: Terminator::new(id.clone()),
    };
    let runtime = ActorRuntime {
        id: address.id(),
        actor,
        context,
        awake_envelope: Some(awake_envelope),
        done_notifier,
        msg_rx,
        hp_msg_rx,
        join_tx,
    };
    crate::compat::spawn_async(runtime.entrypoint());
    address
}

/// `Context` of a `ActorRuntime` that contains `Address` and `Receiver`.
pub struct Context<A: Actor> {
    alive: bool,
    address: Address<A>,
    lifetime_tracker: LifetimeTracker<A>,
    //terminator: Terminator,
}

impl<A: Actor> Context<A> {
    /// Returns an instance of the `Address`.
    pub fn address(&mut self) -> &mut Address<A> {
        &mut self.address
    }

    /// Starts and binds an `Actor`.
    pub fn spawn_actor<T>(&mut self, actor: T, group: A::GroupBy) -> Address<T>
    where
        T: Actor + StartedBy<A> + InterruptedBy<A>,
        A: Eliminated<T>,
    {
        let custom_name = None;
        let address = spawn(actor, Some(self.address.clone()), custom_name);
        self.lifetime_tracker.insert(address.clone(), group);
        address
    }

    /// Starts and binds a `Task`.
    pub fn spawn_task<T, M>(&mut self, task: T, tag: M, group: A::GroupBy) -> TaskAddress<T>
    where
        T: LiteTask,
        A: TaskEliminated<T, M>,
        M: Tag,
    {
        let custom_name = None;
        let stopper = lite_runtime::spawn(task, tag, Some(self.address.clone()), custom_name);
        self.lifetime_tracker.insert_task(stopper.clone(), group);
        stopper
    }

    /// Spawns interaction task that forwards the result of an interaction.
    pub fn attach<S, M>(&mut self, stream: S, tag: M, group: A::GroupBy)
    where
        S: Stream + Unpin + Send + 'static,
        S::Item: Send,
        A: Consumer<S::Item>,
        M: Tag,
    {
        let forwarder = StreamForwarder::new(stream, self.address.clone());
        self.spawn_task(forwarder, tag, group);
    }

    /// Spawns `InteractionTask` as a `LiteTask` and await the result as an `Action`
    /// that will call `InteractionDone` handler.
    pub fn track_interaction<I, M>(&mut self, task: InteractionTask<I>, tag: M, group: A::GroupBy)
    where
        I: Interaction,
        A: InteractionDone<I, M>,
        M: Tag,
    {
        self.spawn_task(task, tag, group);
    }

    /// Interrupts an `Actor`.
    pub fn interrupt<T>(&mut self, address: &mut Address<T>) -> Result<(), Error>
    where
        T: Actor + InterruptedBy<A>,
    {
        address.interrupt_by()
    }

    /// Returns `Error` if the `Actor` is terminating.
    /// Useful for checking in handlers.
    pub fn not_terminating(&self) -> Result<(), Error> {
        if self.is_terminating() {
            Err(Reason::Terminating.into())
        } else {
            Ok(())
        }
    }

    /// Stops the runtime of the `Actor` on one message will be processed after this call.
    ///
    /// It's recommended way to terminate `Actor` is the `shutdown` method.
    ///
    /// > Attention! Termination process will never started here and all spawned actors
    /// and tasks will be orphaned.
    pub fn stop(&mut self) {
        self.alive = false;
    }

    /// Starts graceful termination of the `Actor`.
    pub fn shutdown(&mut self) {
        self.lifetime_tracker.start_termination();
        if self.lifetime_tracker.is_finished() {
            self.stop();
        }
    }

    // TODO: Maybe provide a reference to the `LifetimeTracker`
    // instead of recalling methods below:

    /// Sends interruption signal to the sepcific group of actors and tasks.
    pub fn terminate_group(&mut self, group: A::GroupBy) {
        self.lifetime_tracker.terminate_group(group)
    }

    /// Returns true if the shutdown process is in progress.
    pub fn is_terminating(&self) -> bool {
        self.lifetime_tracker.is_terminating()
    }

    /// Increases the priority of the `Actor`'s type.
    pub fn termination_sequence(&mut self, sequence: Vec<A::GroupBy>) {
        self.lifetime_tracker.termination_sequence(sequence);
    }
}

/// `ActorRuntime` for `Actor`.
pub struct ActorRuntime<A: Actor> {
    id: IdOf<A>,
    actor: A,
    context: Context<A>,
    awake_envelope: Option<Envelope<A>>,
    done_notifier: Box<dyn LifecycleNotifier<Done<A>>>,
    /// `Receiver` that have to be used to receive incoming messages.
    msg_rx: mpsc::Receiver<Envelope<A>>,
    /// High-priority receiver
    hp_msg_rx: mpsc::UnboundedReceiver<Parcel<A>>,
    /// Sends a signal when the `Actor` completely stopped.
    join_tx: watch::Sender<Status>,
}

impl<A: Actor> ActorRuntime<A> {
    /// The `entrypoint` of the `ActorRuntime` that calls `routine` method.
    async fn entrypoint(mut self) {
        log::info!("Actor started: {:?}", self.id);
        let mut awake_envelope = self
            .awake_envelope
            .take()
            .expect("awake envelope has to be set in spawn method!");
        let awake_res = awake_envelope
            .handle(&mut self.actor, &mut self.context)
            .await;
        match awake_res {
            Ok(_) => {
                self.routine().await;
            }
            Err(err) => {
                log::error!(
                    "Can't call awake notification handler of the actor {:?}: {}",
                    self.id,
                    err
                );
            }
        }
        log::info!("Actor finished: {:?}", self.id);
        let done_event = Done::new(self.id.clone());
        if let Err(err) = self.done_notifier.notify(done_event) {
            log::error!(
                "Can't send done notification from the actor {:?}: {}",
                self.id,
                err
            );
        }
        if !self.join_tx.is_closed() {
            if let Err(_err) = self.join_tx.send(Status::Stop) {
                log::error!("Can't release joiners of {:?}", self.id);
            }
        }
    }

    async fn routine(&mut self) {
        let mut scheduled_queue = crate::compat::DelayQueue::<Envelope<A>>::new().fuse();
        let mut pendel = pending();
        while self.context.alive {
            // This is a workaround not to call `DelayQueue` if it has no items,
            // because its resumable, but returns `None` and brokes (closes) `FusedStream`.
            // And after some delay no any future scheduled event occured.
            // TODO: Create `tokio_util` PR to make `DelayQueue` unstoppable (always pending)
            // Awaiting for: https://github.com/tokio-rs/tokio/issues/3407
            let maybe_queue: &mut (dyn FusedStream<Item = Result<_, _>> + Unpin + Send) =
                if scheduled_queue.get_ref().len() > 0 {
                    &mut scheduled_queue
                } else {
                    &mut pendel
                };
            select_biased! {
                hp_envelope = self.hp_msg_rx.recv().fuse() => {
                    if let Some(hp_env) = hp_envelope {
                        let envelope = hp_env.envelope;
                        let process_envelope;
                        match hp_env.operation {
                            Operation::Forward => {
                                process_envelope = Some(envelope);
                            }
                            Operation::Done { id } => {
                                self.context.lifetime_tracker.remove(&id);
                                if self.context.lifetime_tracker.is_finished() {

                                    self.context.stop();
                                }
                                process_envelope = Some(envelope);
                            }
                            Operation::Schedule { deadline } => {
                                scheduled_queue.get_mut().insert_at(envelope, deadline);
                                log::trace!("Scheduled events: {}", scheduled_queue.get_ref().len());
                                process_envelope = None;
                            }
                        }
                        if let Some(mut envelope) = process_envelope {
                            let handle_res = envelope.handle(&mut self.actor, &mut self.context).await;
                            if let Err(err) = handle_res {
                                log::error!("Handler for {:?} (high-priority) failed: {}", self.id, err);
                            }
                        }
                    } else {
                        // Even if all `Address` dropped `Actor` can do something useful on
                        // background. Than don't terminate actors without `Addresses`, because
                        // it still has controllers.
                        // Background tasks = something spawned that `Actors` waits for finishing.
                        log::trace!("Messages stream of {:?} (high-priority) drained.", self.id);
                        if let Err(err) = self.actor.instant_queue_drained(&mut self.context).await {
                            log::error!("Queue (high-priority) drained handler {:?} failed: {}", self.id, err);
                        }
                    }
                }
                opt_delayed_envelope = maybe_queue.next() => {
                    if let Some(delayed_envelope) = opt_delayed_envelope {
                        match delayed_envelope {
                            Ok(expired) => {
                                log::trace!("Execute scheduled event. Remained: {}", scheduled_queue.get_ref().len());
                                let mut envelope = expired.into_inner();
                                let handle_res = envelope.handle(&mut self.actor, &mut self.context).await;
                                if let Err(err) = handle_res {
                                    log::error!("Handler for {:?} (scheduled) failed: {}", self.id, err);
                                }
                            }
                            Err(err) => {
                                log::error!("Failed scheduled execution for {:?}: {}", self.id, err);
                            }
                        }
                    } else {
                        log::error!("Delay queue of {} closed.", self.id);
                        if let Err(err) = self.actor.instant_queue_drained(&mut self.context).await {
                            log::error!("Queue (high-priority) drained handler {:?} failed: {}", self.id, err);
                        }
                    }
                }
                lp_envelope = self.msg_rx.recv().fuse() => {
                    if let Some(mut envelope) = lp_envelope {
                        let handle_res = envelope.handle(&mut self.actor, &mut self.context).await;
                        if let Err(err) = handle_res {
                            log::error!("Handler for {:?} failed: {}", self.id, err);
                        }
                    } else {
                        // Even if all `Address` dropped `Actor` can do something useful on
                        // background. Than don't terminate actors without `Addresses`, because
                        // it still has controllers.
                        // Background tasks = something spawned that `Actors` waits for finishing.
                        log::trace!("Messages stream of {:?} drained.", self.id);
                        if let Err(err) = self.actor.queue_drained(&mut self.context).await {
                            log::error!("Queue drained handler {:?} failed: {}", self.id, err);
                        }
                    }
                }
            }
            /*
            let inspection_res = self.actor.inspection(&mut self.context).await;
            if let Err(err) = inspection_res {
                log::error!("Inspection of {:?} failed: {}", self.id, err);
            }
            */
        }
    }
}