1use crate::event::Event;
20use crate::player::PlayerInfo;
21use async_trait::async_trait;
22use chrono::{DateTime, Utc};
23use std::cmp::Reverse;
24use std::collections::{HashMap, VecDeque, vec_deque};
25use std::sync::{Arc, RwLock, RwLockReadGuard};
26use std::time::Duration;
27use thiserror::Error;
28use tokio::sync::broadcast;
29use tokio::sync::broadcast::error::RecvError;
30use tokio::{select, time};
31#[cfg(feature = "tokio-graceful-shutdown")]
32use tokio_graceful_shutdown::{FutureExt, IntoSubsystem, SubsystemHandle};
33use tracing::{debug, error, warn};
34use uuid::Uuid;
35
36#[derive(Debug, Clone)]
41pub struct Bloop<Player> {
42 player: Arc<RwLock<Player>>,
44
45 pub player_id: Uuid,
50
51 pub client_id: String,
53
54 pub recorded_at: DateTime<Utc>,
56}
57
58impl<Player: PlayerInfo> Bloop<Player> {
59 pub fn new(
62 player: Arc<RwLock<Player>>,
63 client_id: impl Into<String>,
64 recorded_at: DateTime<Utc>,
65 ) -> Self {
66 let player_id = player.read().unwrap().id();
67
68 Bloop {
69 player_id,
70 player,
71 client_id: client_id.into(),
72 recorded_at,
73 }
74 }
75}
76
77impl<Player> Bloop<Player> {
78 pub fn player(&self) -> RwLockReadGuard<'_, Player> {
83 self.player.read().unwrap()
84 }
85}
86
87#[inline]
89pub fn bloops_since<Player>(since: DateTime<Utc>) -> impl Fn(&&Arc<Bloop<Player>>) -> bool {
90 move |bloop| bloop.recorded_at >= since
91}
92
93#[inline]
95pub fn bloops_for_player<Player>(player_id: Uuid) -> impl Fn(&&Arc<Bloop<Player>>) -> bool {
96 move |bloop| bloop.player_id == player_id
97}
98
99#[derive(Debug)]
103pub struct BloopCollection<Player> {
104 bloops: VecDeque<Arc<Bloop<Player>>>,
105 max_age: Duration,
106}
107
108impl<Player> BloopCollection<Player> {
109 pub fn new(max_age: Duration) -> Self {
111 Self {
112 bloops: VecDeque::new(),
113 max_age,
114 }
115 }
116
117 pub fn with_bloops(max_age: Duration, mut bloops: Vec<Arc<Bloop<Player>>>) -> Self {
119 bloops.sort_by_key(|bloop| Reverse(bloop.recorded_at));
120
121 let mut collection = Self::new(max_age);
122 collection.bloops.extend(bloops);
123
124 collection
125 }
126
127 pub fn add(&mut self, bloop: Arc<Bloop<Player>>) {
129 let threshold = Utc::now() - self.max_age;
130
131 while let Some(oldest) = self.bloops.back() {
132 if oldest.recorded_at < threshold {
133 self.bloops.pop_back();
134 } else {
135 break;
136 }
137 }
138
139 self.bloops.push_front(bloop);
140 }
141
142 pub fn iter(&self) -> impl Iterator<Item = &Arc<Bloop<Player>>> {
144 self.bloops.iter()
145 }
146}
147
148impl<'a, Player: 'a> IntoIterator for &'a BloopCollection<Player> {
149 type Item = &'a Arc<Bloop<Player>>;
150 type IntoIter = vec_deque::Iter<'a, Arc<Bloop<Player>>>;
151
152 fn into_iter(self) -> Self::IntoIter {
153 self.bloops.iter()
154 }
155}
156
157#[derive(Debug)]
159pub struct BloopProvider<Player> {
160 global: BloopCollection<Player>,
161 per_client: HashMap<String, BloopCollection<Player>>,
162 max_age: Duration,
163 empty_collection: BloopCollection<Player>,
164}
165
166impl<Player> BloopProvider<Player> {
167 pub fn new(max_age: Duration) -> Self {
169 Self {
170 global: BloopCollection::new(max_age),
171 per_client: HashMap::new(),
172 max_age,
173 empty_collection: BloopCollection::new(max_age),
174 }
175 }
176
177 pub fn with_bloops(max_age: Duration, bloops: Vec<Bloop<Player>>) -> Self {
179 let bloops: Vec<Arc<Bloop<Player>>> = bloops.into_iter().map(Arc::new).collect();
180 let global_collection = BloopCollection::with_bloops(max_age, bloops.clone());
181
182 let mut per_client: HashMap<String, BloopCollection<Player>> = HashMap::new();
183 let mut client_groups: HashMap<String, Vec<Arc<Bloop<Player>>>> = HashMap::new();
184
185 for bloop in bloops {
186 client_groups
187 .entry(bloop.client_id.clone())
188 .or_default()
189 .push(bloop.clone());
190 }
191
192 for (client_id, client_bloops) in client_groups {
193 let collection = BloopCollection::with_bloops(max_age, client_bloops);
194 per_client.insert(client_id, collection);
195 }
196
197 Self {
198 global: global_collection,
199 per_client,
200 max_age,
201 empty_collection: BloopCollection::new(max_age),
202 }
203 }
204
205 pub fn add(&mut self, bloop: Arc<Bloop<Player>>) {
207 self.global.add(bloop.clone());
208
209 let client_collection = self
210 .per_client
211 .entry(bloop.client_id.clone())
212 .or_insert_with(|| BloopCollection::new(self.max_age));
213
214 client_collection.add(bloop.clone());
215 }
216
217 pub fn global(&self) -> &BloopCollection<Player> {
219 &self.global
220 }
221
222 pub fn for_client(&self, client_id: &str) -> &BloopCollection<Player> {
224 self.per_client
225 .get(client_id)
226 .unwrap_or(&self.empty_collection)
227 }
228}
229
230#[derive(Debug, Clone)]
232pub struct ProcessedBloop {
233 pub player_id: Uuid,
235
236 pub client_id: String,
238
239 pub recorded_at: DateTime<Utc>,
241}
242
243impl<Player> From<&Bloop<Player>> for ProcessedBloop {
244 fn from(bloop: &Bloop<Player>) -> Self {
245 Self {
246 player_id: bloop.player_id,
247 client_id: bloop.client_id.clone(),
248 recorded_at: bloop.recorded_at,
249 }
250 }
251}
252
253#[async_trait]
255pub trait BloopRepository {
256 type Error: std::fmt::Debug + std::fmt::Display + Send + Sync + 'static;
257
258 async fn persist_batch(&self, bloops: &[ProcessedBloop]) -> Result<(), Self::Error>;
259}
260
261#[derive(Debug)]
263pub struct ProcessedBloopSink<R: BloopRepository> {
264 repository: R,
265 buffer: Vec<ProcessedBloop>,
266 max_batch_size: usize,
267 max_batch_duration: Duration,
268 event_rx: broadcast::Receiver<Event>,
269}
270
271impl<R: BloopRepository> ProcessedBloopSink<R> {
272 pub async fn process_events(&mut self) {
274 let mut flush_interval = time::interval(self.max_batch_duration);
275
276 loop {
277 let should_continue = select! {
278 _ = flush_interval.tick() => {
279 self.flush().await;
280 true
281 }
282
283 result = self.event_rx.recv() => {
284 self.handle_recv(result).await
285 }
286 };
287
288 if !should_continue {
289 break;
290 }
291 }
292 }
293
294 pub async fn flush(&mut self) {
296 if self.buffer.is_empty() {
297 return;
298 }
299
300 let batch = std::mem::take(&mut self.buffer);
301
302 if let Err(err) = self.repository.persist_batch(&batch).await {
303 error!("Failed to persist bloop batch: {}", err);
304 self.buffer.extend(batch);
305 } else {
306 debug!("Persisted {} bloops", batch.len());
307 }
308 }
309
310 async fn handle_recv(&mut self, result: Result<Event, RecvError>) -> bool {
311 match result {
312 Ok(Event::BloopProcessed(bloop)) => {
313 self.buffer.push(bloop);
314
315 if self.buffer.len() >= self.max_batch_size {
316 debug!("Batch size reached, flushing");
317 self.flush().await;
318 }
319 true
320 }
321 Ok(_) => true,
322 Err(RecvError::Lagged(n)) => {
323 warn!("ProcessedBloopSink lagged by {n} messages, some bloops were missed");
324 self.flush().await;
325 true
326 }
327 Err(RecvError::Closed) => {
328 debug!("ProcessedBloopSink event stream closed, exiting event loop");
329 false
330 }
331 }
332 }
333}
334
335#[cfg(feature = "tokio-graceful-shutdown")]
336#[derive(Debug, Error)]
337pub enum NeverError {}
338
339#[cfg(feature = "tokio-graceful-shutdown")]
340#[async_trait]
341impl<R> IntoSubsystem<NeverError> for ProcessedBloopSink<R>
342where
343 R: BloopRepository + Send + Sync + 'static,
344{
345 async fn run(mut self, subsys: SubsystemHandle) -> Result<(), NeverError> {
346 let _ = self.process_events().cancel_on_shutdown(&subsys).await;
347 self.flush().await;
348
349 Ok(())
350 }
351}
352
353#[derive(Debug, Error)]
354pub enum BuilderError {
355 #[error("missing field: {0}")]
356 MissingField(&'static str),
357}
358
359#[derive(Debug, Default)]
361pub struct ProcessedBloopSinkBuilder<R: BloopRepository> {
362 repository: Option<R>,
363 max_batch_size: Option<usize>,
364 max_batch_duration: Option<Duration>,
365 event_rx: Option<broadcast::Receiver<Event>>,
366}
367
368impl<R: BloopRepository> ProcessedBloopSinkBuilder<R> {
369 pub fn new() -> Self {
371 Self {
372 repository: None,
373 max_batch_size: None,
374 max_batch_duration: None,
375 event_rx: None,
376 }
377 }
378
379 pub fn repository(mut self, repository: R) -> Self {
381 self.repository = Some(repository);
382 self
383 }
384
385 pub fn max_batch_size(mut self, size: usize) -> Self {
387 self.max_batch_size = Some(size);
388 self
389 }
390
391 pub fn max_batch_duration(mut self, duration: Duration) -> Self {
393 self.max_batch_duration = Some(duration);
394 self
395 }
396
397 pub fn event_rx(mut self, event_rx: broadcast::Receiver<Event>) -> Self {
399 self.event_rx = Some(event_rx);
400 self
401 }
402
403 pub fn build(self) -> Result<ProcessedBloopSink<R>, BuilderError> {
406 Ok(ProcessedBloopSink {
407 repository: self
408 .repository
409 .ok_or(BuilderError::MissingField("repository"))?,
410 buffer: Vec::new(),
411 max_batch_size: self
412 .max_batch_size
413 .ok_or(BuilderError::MissingField("max_batch_size"))?,
414 max_batch_duration: self
415 .max_batch_duration
416 .ok_or(BuilderError::MissingField("max_batch_duration"))?,
417 event_rx: self
418 .event_rx
419 .ok_or(BuilderError::MissingField("event_rx"))?,
420 })
421 }
422}
423
424#[cfg(test)]
425mod tests {
426 use super::*;
427 use crate::bloop::{Bloop, BloopCollection, BloopProvider};
428 use crate::test_utils::MockPlayer;
429 use ntest::timeout;
430 use std::sync::{Arc, Mutex};
431 use std::time::Duration;
432 use tokio::sync::mpsc;
433
434 fn create_mock_bloop(recorded_at: DateTime<Utc>) -> Arc<Bloop<MockPlayer>> {
435 let (player, _) = MockPlayer::builder().build();
436 Arc::new(Bloop::new(player, "client-1", recorded_at))
437 }
438
439 #[test]
440 fn bloop_collection_adds_bloops_and_preserves_order() {
441 let now = Utc::now();
442 let bloop1 = create_mock_bloop(now);
443 let bloop2 = create_mock_bloop(now + Duration::from_secs(1));
444
445 let mut collection = BloopCollection::new(Duration::from_secs(60));
446 collection.add(bloop2.clone());
447 collection.add(bloop1.clone());
448
449 let bloops: Vec<_> = collection.iter().collect();
450 assert_eq!(bloops.len(), 2);
451 assert_eq!(bloops[0].recorded_at, bloop1.recorded_at);
452 assert_eq!(bloops[1].recorded_at, bloop2.recorded_at);
453 }
454
455 #[test]
456 fn bloop_collection_prunes_bloops_older_than_max_age() {
457 let mut collection = BloopCollection::new(Duration::from_secs(5));
458 let now = Utc::now();
459
460 let old_bloop = create_mock_bloop(now - Duration::from_secs(10));
461 let recent_bloop = create_mock_bloop(now - Duration::from_secs(2));
462
463 collection.add(old_bloop);
464 collection.add(recent_bloop.clone());
465
466 let bloops: Vec<_> = collection.iter().collect();
467 assert_eq!(bloops.len(), 1);
468 assert_eq!(bloops[0].recorded_at, recent_bloop.recorded_at);
469 }
470
471 #[test]
472 fn bloop_provider_adds_bloops_to_global_and_per_client_collections() {
473 let mut provider = BloopProvider::new(Duration::from_secs(60));
474 let now = Utc::now();
475 let bloop = create_mock_bloop(now);
476 let client_id = bloop.client_id.clone();
477
478 provider.add(bloop.clone());
479
480 let global_bloops: Vec<_> = provider.global().iter().collect();
481 assert_eq!(global_bloops.len(), 1);
482 assert_eq!(global_bloops[0].recorded_at, bloop.recorded_at);
483
484 let client_bloops: Vec<_> = provider.for_client(&client_id).iter().collect();
485 assert_eq!(client_bloops.len(), 1);
486 assert_eq!(client_bloops[0].recorded_at, bloop.recorded_at);
487 }
488
489 #[test]
490 fn bloop_provider_correctly_initializes_from_existing_bloops() {
491 let now = Utc::now();
492 let (player1, _) = MockPlayer::builder().build();
493 let (player2, _) = MockPlayer::builder().build();
494
495 let bloop1 = Bloop::new(player1.clone(), "client-1", now);
496 let bloop2 = Bloop::new(player1, "client-1", now + Duration::from_secs(1));
497 let bloop3 = Bloop::new(player2, "client-2", now);
498
499 let provider =
500 BloopProvider::with_bloops(Duration::from_secs(60), vec![bloop1, bloop2, bloop3]);
501
502 let global_bloops: Vec<_> = provider.global().iter().collect();
503 assert_eq!(global_bloops.len(), 3);
504
505 let client1_bloops: Vec<_> = provider.for_client("client-1").iter().collect();
506 assert_eq!(client1_bloops.len(), 2);
507 assert!(client1_bloops.iter().any(|b| b.recorded_at == now));
508 assert!(
509 client1_bloops
510 .iter()
511 .any(|b| b.recorded_at == now + Duration::from_secs(1))
512 );
513
514 let client2_bloops: Vec<_> = provider.for_client("client-2").iter().collect();
515 assert_eq!(client2_bloops.len(), 1);
516 assert_eq!(client2_bloops[0].recorded_at, now);
517 }
518
519 #[test]
520 fn bloops_since_filter_correctly_filters_based_on_timestamp() {
521 let now = Utc::now();
522 let bloop1 = create_mock_bloop(now);
523 let bloop2 = create_mock_bloop(now + Duration::from_secs(10));
524
525 let bloops = vec![bloop1.clone(), bloop2.clone()];
526 let since = now + Duration::from_secs(5);
527 let filtered: Vec<_> = bloops.iter().filter(bloops_since(since)).collect();
528
529 assert_eq!(filtered.len(), 1);
530 assert_eq!(filtered[0].recorded_at, bloop2.recorded_at);
531 }
532
533 #[test]
534 fn bloops_for_player_filter_correctly_filters_based_on_player_id() {
535 let now = Utc::now();
536 let bloop1 = create_mock_bloop(now);
537 let (player2, player2_id) = MockPlayer::builder().build();
538 let bloop2 = Arc::new(Bloop::new(player2, "client-1", now));
539
540 let bloops = vec![bloop1.clone(), bloop2.clone()];
541 let filtered: Vec<_> = bloops
542 .iter()
543 .filter(bloops_for_player(player2_id))
544 .collect();
545
546 assert_eq!(filtered.len(), 1);
547 assert_eq!(filtered[0].player_id, player2_id);
548 }
549
550 #[derive(Clone)]
551 struct DummyRepo {
552 sender: mpsc::UnboundedSender<Vec<ProcessedBloop>>,
553 fail_persist: Arc<Mutex<bool>>,
554 }
555
556 impl DummyRepo {
557 fn new(sender: mpsc::UnboundedSender<Vec<ProcessedBloop>>) -> Self {
558 Self {
559 sender,
560 fail_persist: Arc::new(Mutex::new(false)),
561 }
562 }
563
564 fn set_fail(&self, fail: bool) {
565 *self.fail_persist.lock().unwrap() = fail;
566 }
567 }
568
569 #[async_trait::async_trait]
570 impl BloopRepository for DummyRepo {
571 type Error = &'static str;
572
573 async fn persist_batch(&self, bloops: &[ProcessedBloop]) -> Result<(), Self::Error> {
574 if *self.fail_persist.lock().unwrap() {
575 return Err("fail");
576 }
577 self.sender.send(bloops.to_vec()).unwrap();
578 Ok(())
579 }
580 }
581
582 fn create_processed_bloop() -> ProcessedBloop {
583 ProcessedBloop {
584 player_id: Uuid::new_v4(),
585 client_id: "client-1".to_string(),
586 recorded_at: Utc::now(),
587 }
588 }
589
590 #[tokio::test]
591 async fn flush_persists_and_clears_buffer() {
592 let (tx, mut rx) = mpsc::unbounded_channel();
593 let repo = DummyRepo::new(tx);
594
595 let (_evt_tx, evt_rx) = broadcast::channel(16);
596
597 let mut sink = ProcessedBloopSinkBuilder::new()
598 .repository(repo.clone())
599 .max_batch_size(10)
600 .max_batch_duration(Duration::from_secs(5))
601 .event_rx(evt_rx)
602 .build()
603 .unwrap();
604
605 sink.buffer.push(create_processed_bloop());
606 sink.flush().await;
607
608 let batch = rx.recv().await.unwrap();
609 assert_eq!(batch.len(), 1);
610 assert!(sink.buffer.is_empty());
611 }
612
613 #[tokio::test]
614 async fn flush_retries_on_failure() {
615 let (tx, mut rx) = mpsc::unbounded_channel();
616 let repo = DummyRepo::new(tx);
617 repo.set_fail(true);
618
619 let (_evt_tx, evt_rx) = broadcast::channel(16);
620
621 let mut sink = ProcessedBloopSinkBuilder::new()
622 .repository(repo.clone())
623 .max_batch_size(10)
624 .max_batch_duration(Duration::from_secs(5))
625 .event_rx(evt_rx)
626 .build()
627 .unwrap();
628
629 sink.buffer.push(create_processed_bloop());
630 sink.flush().await;
631 assert_eq!(sink.buffer.len(), 1);
632
633 repo.set_fail(false);
634 sink.flush().await;
635
636 let batch = rx.recv().await.unwrap();
637 assert_eq!(batch.len(), 1);
638 assert!(sink.buffer.is_empty());
639 }
640
641 #[tokio::test]
642 #[timeout(1000)]
643 async fn process_events_flushes_on_batch_size() {
644 let (tx, mut rx) = mpsc::unbounded_channel();
645 let repo = DummyRepo::new(tx);
646 let (evt_tx, evt_rx) = broadcast::channel(16);
647
648 let mut sink = ProcessedBloopSinkBuilder::new()
649 .repository(repo.clone())
650 .max_batch_size(2)
651 .max_batch_duration(Duration::from_secs(10))
652 .event_rx(evt_rx)
653 .build()
654 .unwrap();
655
656 let bloop1 = create_processed_bloop();
657 let bloop2 = create_processed_bloop();
658
659 let handle = tokio::spawn(async move { sink.process_events().await });
660
661 evt_tx.send(Event::BloopProcessed(bloop1.clone())).unwrap();
662 evt_tx.send(Event::BloopProcessed(bloop2.clone())).unwrap();
663
664 let batch = rx.recv().await.unwrap();
665 assert_eq!(batch.len(), 2);
666
667 drop(evt_tx);
668
669 handle.await.unwrap();
670 }
671
672 #[tokio::test]
673 async fn handle_recv_returns_false_on_closed() {
674 let (tx, _rx) = mpsc::unbounded_channel();
675 let repo = DummyRepo::new(tx);
676 let (evt_tx, evt_rx) = broadcast::channel(16);
677
678 let mut sink = ProcessedBloopSinkBuilder::new()
679 .repository(repo.clone())
680 .max_batch_size(10)
681 .max_batch_duration(Duration::from_secs(10))
682 .event_rx(evt_rx)
683 .build()
684 .unwrap();
685
686 drop(evt_tx);
687
688 let recv_result = sink.event_rx.recv().await;
689 let result = sink.handle_recv(recv_result).await;
690 assert!(!result);
691 }
692}