1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
use super::{
    node::Node,
    KeyspaceConfig,
    Scylla,
    ScyllaEvent,
    ScyllaHandle,
};
use crate::{
    app::ring::{
        Registry,
        ReplicationInfo,
        SharedRing,
    },
    cql::CqlBuilder,
};
use std::sync::Arc;
use thiserror::Error;
use tokio::sync::RwLock;

use async_trait::async_trait;
use overclock::{
    core::{
        Actor,
        ActorError,
        ActorRequest,
        ActorResult,
        Rt,
        ScopeId,
        Service,
        ServiceEvent,
        ServiceStatus,
        Shutdown,
        ShutdownEvent,
        StreamExt,
        UnboundedChannel,
        UnboundedHandle,
    },
    prefab::websocket::{
        GenericResponder,
        JsonMessage,
        Responder,
    },
};

use std::{
    collections::HashMap,
    convert::TryFrom,
    net::SocketAddr,
};

pub(crate) type Nodes = HashMap<SocketAddr, NodeInfo>;

/// Cluster state
pub struct Cluster {
    nodes: Nodes,
    keyspaces: HashMap<String, ReplicationInfo>,
}

/// Cluster Event type
pub enum ClusterEvent {
    /// Topology configuration
    Topology(Topology, Option<TopologyResponder>),
    /// Used by the Node to keep the cluster up to date with its service
    Microservice(ScopeId, Service, Option<ActorResult<()>>),
    /// Shutdown signal
    Shutdown,
}

impl ShutdownEvent for ClusterEvent {
    fn shutdown_event() -> Self {
        Self::Shutdown
    }
}

impl ServiceEvent<Node> for ClusterEvent {
    fn eol_event(scope_id: ScopeId, service: Service, _: Node, r: ActorResult<()>) -> Self {
        Self::Microservice(scope_id, service, Some(r))
    }
    fn report_event(scope_id: ScopeId, service: Service) -> Self {
        Self::Microservice(scope_id, service, None)
    }
}

/// Cluster topology event
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub enum Topology {
    /// Used by Scylla/dashboard to add/connect to new scylla node in the cluster
    AddNode(SocketAddr),
    /// Used by Scylla/dashboard to remove/disconnect from existing scylla node in the cluster
    RemoveNode(SocketAddr),
    /// Upsert keyspace
    UpsertKeyspace(KeyspaceConfig),
    /// Remove keyspace by its name
    RemoveKeyspace(String),
    /// Used by Scylla/dashboard to build new ring and expose the recent cluster topology
    BuildRing,
}

/// Topology responder
pub enum TopologyResponder {
    /// Websocket responder
    WsResponder(Responder),
    /// OneShot Responder
    OneShot(tokio::sync::oneshot::Sender<TopologyResponse>),
}
impl TopologyResponder {
    async fn reply(self, response: TopologyResponse) -> anyhow::Result<()> {
        match self {
            Self::WsResponder(r) => r.inner_reply(response).await,
            Self::OneShot(tx) => tx.send(response).map_err(|_| anyhow::Error::msg("caller out of scope")),
        }
    }
}
/// The topology response, sent after the cluster processes a topology event
pub type TopologyResponse = Result<Topology, TopologyErr>;

#[derive(serde::Deserialize, serde::Serialize, Debug, Error)]
#[error("message: {message:?}")]
/// Topology error,
pub struct TopologyErr {
    message: String,
}

impl TopologyErr {
    fn new(message: String) -> Self {
        Self { message }
    }
}

impl Cluster {
    /// Create new cluster with empty state
    pub fn new() -> Self {
        let nodes = HashMap::new();
        let keyspaces = HashMap::new();
        Self { nodes, keyspaces }
    }
}

/// `NodeInfo` contains the field to identify a ScyllaDB node.
#[derive(Clone)]
pub struct NodeInfo {
    /// The scope id of the node
    pub(crate) scope_id: ScopeId,
    /// The address of the node
    pub(crate) address: SocketAddr,
    /// in which data_center the scylla node exist
    pub(crate) data_center: String,
    /// The tokens of all nodes shards.
    pub(crate) tokens: Vec<i64>,
    /// the shard_count in scylla node.
    pub(crate) shard_count: u16,
    /// the most significant bit
    pub(crate) msb: u8,
}

/// The Cluster actor lifecycle implementation
#[async_trait]
impl Actor<ScyllaHandle> for Cluster {
    type Data = (Scylla, Arc<RwLock<Registry>>);
    type Channel = UnboundedChannel<ClusterEvent>;
    async fn init(&mut self, rt: &mut Rt<Self, ScyllaHandle>) -> ActorResult<Self::Data> {
        log::info!("Cluster is {}", rt.service().status());
        // add empty registry as resource
        let reporters_registry = Arc::new(RwLock::new(Registry::new()));
        rt.publish(reporters_registry.clone()).await;
        let parent_id = rt
            .parent_id()
            .ok_or_else(|| ActorError::exit_msg("cluster without scylla supervisor"))?;
        let scylla = rt
            .lookup::<Scylla>(parent_id)
            .await
            .ok_or_else(|| ActorError::exit_msg("cluster unables to lookup for scylla as config"))?;
        // add route to enable configuring the cluster topology over the ws
        rt.add_route::<(JsonMessage, Responder)>().await.ok();
        let node_iter = scylla.nodes.iter();
        for address in node_iter {
            log::info!("Starting node: {}", address);
            if let Err(e) = self.start_node(rt, address.clone(), &scylla).await {
                log::error!("Unable to start node: {}, error: {}", address, e);
                Err(e)?;
            } else {
                log::info!("Successfully started node: {}", address);
            };
        }
        let keyspaces = scylla.keyspaces.iter();
        for super::KeyspaceConfig { name, data_centers } in keyspaces {
            let mut info = ReplicationInfo::empty();
            for (dc_name, dc_config) in data_centers {
                info.upsert(dc_name, dc_config.replication_factor as usize);
            }
            self.keyspaces.insert(name.clone(), info);
        }
        if self.nodes.is_empty() {
            rt.update_status(ServiceStatus::Idle).await;
        } else {
            SharedRing::new(
                &scylla.local_dc,
                reporters_registry.read().await.clone(),
                self.keyspaces.clone(),
                scylla.reporter_count,
                &self.nodes,
            )
            .commit();
        }
        Ok((scylla, reporters_registry))
    }
    async fn run(&mut self, rt: &mut Rt<Self, ScyllaHandle>, (mut scylla, registry): Self::Data) -> ActorResult<()> {
        log::info!("Cluster is {}", rt.service().status());
        while let Some(event) = rt.inbox_mut().next().await {
            match event {
                ClusterEvent::Topology(topology, mut responder_opt) => {
                    // configure topology only if the cluster is not stopping
                    if rt.service().is_stopping() {
                        if let Some(responder) = responder_opt.take() {
                            let error_response: Result<Topology, _> = Err(TopologyErr::new(format!(
                                "Cannot configure topology while the cluster is stopping"
                            )));
                            responder.reply(error_response).await.ok();
                        }
                        continue;
                    }
                    match topology {
                        Topology::UpsertKeyspace(keyspace_config) => {
                            let name = keyspace_config.name.clone();
                            let data_centers = keyspace_config.data_centers.iter();
                            let mut info = ReplicationInfo::empty();
                            for (dc_name, dc_config) in data_centers {
                                info.upsert(dc_name, dc_config.replication_factor as usize);
                            }
                            self.keyspaces.insert(name, info);
                            scylla.insert_keyspace(keyspace_config);
                        }
                        Topology::RemoveKeyspace(name) => {
                            self.keyspaces.remove(&name);
                            scylla.remove_keyspace(&name);
                        }
                        Topology::AddNode(address) => {
                            if self.nodes.contains_key(&address) {
                                if let Some(responder) = responder_opt.take() {
                                    log::error!("Cannot add existing {} node into the cluster", address);
                                    let error_response: Result<Topology, _> = Err(TopologyErr::new(format!(
                                        "Cannot add existing {} node into the cluster",
                                        address
                                    )));
                                    responder.reply(error_response).await.ok();
                                    continue;
                                }
                            } else if responder_opt.is_none() {
                                // skip re-adding a node, because it got removed
                                log::warn!("skipping re-adding a {} node, as it got removed", address);
                                continue;
                            }
                            log::info!("Adding {} node!", address);
                            // to spawn node we first make sure it's online
                            let cql = CqlBuilder::new()
                                .address(address)
                                .tokens()
                                .recv_buffer_size(scylla.recv_buffer_size)
                                .send_buffer_size(scylla.send_buffer_size)
                                .authenticator(scylla.authenticator.clone())
                                .build();
                            match cql.await {
                                Ok(mut cqlconn) => {
                                    log::info!("Successfully connected to node {}!", address);
                                    let shard_count = cqlconn.shard_count();
                                    if let (Some(dc), Some(tokens)) = (cqlconn.take_dc(), cqlconn.take_tokens()) {
                                        // create node
                                        let node = Node::new(address.clone(), shard_count as usize);
                                        // start the node and ensure it got initialized
                                        match rt.start(address.to_string(), node).await {
                                            Ok(h) => {
                                                // create nodeinfo
                                                let node_info = NodeInfo {
                                                    scope_id: h.scope_id(),
                                                    address: address.clone(),
                                                    msb: cqlconn.msb(),
                                                    shard_count,
                                                    data_center: dc,
                                                    tokens,
                                                };
                                                // add node_info to nodes
                                                self.nodes.insert(address.clone(), node_info);
                                                scylla.nodes.insert(address);
                                                log::info!("Added {} node!", address);
                                                if let Some(responder) = responder_opt.take() {
                                                    rt.update_status(ServiceStatus::Maintenance).await;
                                                    log::info!("Cluster is Maintenance");
                                                    let ok_response: Result<_, TopologyErr> =
                                                        Ok(Topology::AddNode(address));
                                                    responder.reply(ok_response).await.ok();
                                                } else {
                                                    if !rt.service().is_maintenance() {
                                                        let maybe_unstable_registry = registry.read().await.clone();
                                                        log::warn!("Rebuilding healthy ring");
                                                        self.build_healthy_ring(maybe_unstable_registry, &scylla);
                                                        self.update_service_status(rt).await;
                                                    } // else the admin supposed to rebuild the ring
                                                }
                                            }
                                            Err(err) => {
                                                if let Some(responder) = responder_opt.take() {
                                                    log::error!("unable to add {} node, error: {}", address, err);
                                                    let error_response: Result<Topology, _> = Err(TopologyErr::new(
                                                        format!("unable to add {} node, error: {}", address, err),
                                                    ));
                                                    responder.reply(error_response).await.ok();
                                                } else {
                                                    let my_handle = rt.handle().clone();
                                                    Self::restart_node(my_handle, address);
                                                }
                                            }
                                        }
                                    } else {
                                        log::error!("Failed to retrieve data from CQL Connection!");
                                        return Err(ActorError::exit_msg(
                                            "Failed to retrieve data from CQL Connection!",
                                        ));
                                    }
                                }
                                Err(error) => {
                                    log::warn!("Unable to connect to node {}!", address);
                                    if let Some(responder) = responder_opt.take() {
                                        let error_response: Result<Topology, _> = Err(TopologyErr::new(format!(
                                            "Unable to add {} node, error: {}",
                                            address, error
                                        )));
                                        responder.reply(error_response).await.ok();
                                    } else {
                                        let my_handle = rt.handle().clone();
                                        Self::restart_node(my_handle, address);
                                    }
                                }
                            }
                        }
                        Topology::RemoveNode(address) => {
                            let responder = responder_opt.take().ok_or_else(|| {
                                ActorError::exit_msg("cannot use remove node topology variant without responder")
                            })?;
                            log::info!("Removing {} node!", address);
                            // get and remove node_info
                            if let Some(node_info) = self.nodes.get(&address) {
                                if let Some(join_handle) = rt.shutdown_child(&node_info.scope_id).await {
                                    // update status to maintenance, as this is a topology event
                                    rt.update_status(ServiceStatus::Maintenance).await;
                                    log::info!("Cluster is Maintenance");
                                    // Await till it gets shutdown, it forces sync shutdown
                                    join_handle.await.ok();
                                    self.nodes.remove(&address);
                                    scylla.nodes.remove(&address);
                                    log::info!("Removed {} node!", address);
                                    let ok_response: Result<_, TopologyErr> = Ok(Topology::RemoveNode(address));
                                    responder.reply(ok_response).await.ok();
                                };
                            } else {
                                log::error!("unable to remove non-existing {} node!", address);
                                // Cannot remove non-existing node.
                                let error_response: Result<Topology, _> = Err(TopologyErr::new(format!(
                                    "unable to remove non-existing {} node",
                                    address
                                )));
                                responder.reply(error_response).await.ok();
                            };
                        }
                        Topology::BuildRing => {
                            let responder = responder_opt
                                .take()
                                .ok_or_else(|| ActorError::exit_msg("cannot build ring without responder"))?;
                            // re/build
                            let status_change;
                            if self.nodes.is_empty() {
                                SharedRing::drop();
                                status_change = ServiceStatus::Idle;
                            } else {
                                let registry_snapshot = registry.read().await.clone();
                                // compute total shards count for all nodes
                                let mut total_shard_count = 0;
                                self.nodes
                                    .iter()
                                    .for_each(|(_addr, node_info)| total_shard_count += node_info.shard_count as usize);
                                // ensure all nodes are running
                                if rt.microservices_any(|node| !node.is_running())
                                    || self.nodes.len() != rt.service().microservices().len()
                                    || registry_snapshot.len() != total_shard_count
                                {
                                    log::error!(
                                        "Unstable cluster, cannot build ring!, fix this by removing any dead node(s)"
                                    );
                                    // the cluster in critical state, we cannot rebuild new ring
                                    if let Some(responder) = responder_opt.take() {
                                        let error_response: Result<Topology, _> =
                                            Err(TopologyErr::new(format!("Unstable cluster, unable to build ring")));
                                        responder.reply(error_response).await.ok();
                                    }
                                    continue;
                                }
                                SharedRing::new(
                                    &scylla.local_dc,
                                    registry_snapshot,
                                    self.keyspaces.clone(),
                                    scylla.reporter_count,
                                    &self.nodes,
                                )
                                .commit();
                                status_change = ServiceStatus::Running;
                            }
                            rt.supervisor_handle()
                                .send(ScyllaEvent::UpdateState(scylla.clone()))
                                .ok();
                            if rt.service().status() != &status_change {
                                log::info!("Cluster is {}", status_change);
                            }
                            rt.update_status(status_change).await;
                            let ok_response: Result<_, TopologyErr> = Ok(Topology::BuildRing);
                            responder.reply(ok_response).await.ok();
                        }
                    }
                }
                ClusterEvent::Microservice(scope_id, service, result_opt) => {
                    if service.is_stopped() {
                        let address: SocketAddr = service
                            .directory()
                            .as_ref()
                            .ok_or_else(|| ActorError::exit_msg("directory microservice for stopped node"))?
                            .parse()
                            .map_err(ActorError::exit)?;
                        if self.nodes.contains_key(&address) {
                            rt.upsert_microservice(scope_id, service);
                        } else {
                            rt.remove_microservice(scope_id);
                        }
                        if !rt.service().is_stopping() && self.nodes.contains_key(&address) {
                            {
                                let maybe_unstable_registry = registry.read().await.clone();
                                self.build_healthy_ring(maybe_unstable_registry, &scylla);
                            }
                            if let Err(ActorError {
                                source: _,
                                request: Some(ActorRequest::Restart(_)),
                            }) = result_opt.expect("No result received from microservice!")
                            {
                                let my_handle = rt.handle().clone();
                                Self::restart_node(my_handle, address);
                            }
                        }
                    } else {
                        rt.upsert_microservice(scope_id, service);
                    }
                    if rt.service().is_maintenance() || rt.service().is_stopping() {
                        rt.update_status(rt.service().status().clone()).await;
                        if rt.service().is_stopping() && rt.microservices_stopped() {
                            rt.inbox_mut().close();
                        }
                    } else {
                        self.update_service_status(rt).await;
                    }
                }
                ClusterEvent::Shutdown => {
                    log::warn!("Cluster is Stopping");
                    // stop all the children/nodes
                    rt.stop().await;
                    SharedRing::drop();
                    if rt.microservices_stopped() {
                        rt.inbox_mut().close();
                    }
                }
            }
        }
        log::info!("Cluster gracefully shutdown");
        Ok(())
    }
}

impl TryFrom<(JsonMessage, Responder)> for ClusterEvent {
    type Error = anyhow::Error;
    fn try_from((msg, responder): (JsonMessage, Responder)) -> Result<Self, Self::Error> {
        Ok(ClusterEvent::Topology(
            serde_json::from_str(msg.0.as_ref())?,
            Some(TopologyResponder::WsResponder(responder)),
        ))
    }
}

impl Cluster {
    async fn start_node(
        &mut self,
        rt: &mut Rt<Self, ScyllaHandle>,
        address: SocketAddr,
        scylla: &Scylla,
    ) -> ActorResult<()> {
        // to spawn node we first make sure it's online
        let mut cqlconn = CqlBuilder::new()
            .address(address)
            .tokens()
            .recv_buffer_size(scylla.recv_buffer_size)
            .send_buffer_size(scylla.send_buffer_size)
            .authenticator(scylla.authenticator.clone())
            .build()
            .await
            .map_err(|e| ActorError::aborted(e))?;
        log::info!("Successfully connected to node {}!", address);
        let shard_count = cqlconn.shard_count();
        if let (Some(dc), Some(tokens)) = (cqlconn.take_dc(), cqlconn.take_tokens()) {
            // create node
            let node = Node::new(address.clone(), shard_count as usize);
            let h = rt.start(address.to_string(), node).await?;
            // create nodeinfo
            let node_info = NodeInfo {
                scope_id: h.scope_id(),
                address: address.clone(),
                msb: cqlconn.msb(),
                shard_count,
                data_center: dc,
                tokens,
            };
            // add node_info to nodes
            self.nodes.insert(address, node_info);
            log::info!("Added {} node!", address);
        } else {
            log::error!("Failed to retrieve data from CQL Connection!");
            return Err(ActorError::exit_msg("Failed to retrieve data from CQL Connection!"));
        }
        Ok(())
    }
    fn restart_node(my_handle: UnboundedHandle<ClusterEvent>, address: SocketAddr) {
        let restart_node_task = async move {
            log::warn!("After 5 seconds will try to restart/reconnect {}", address);
            tokio::time::sleep(std::time::Duration::from_secs(5)).await;
            my_handle
                .send(ClusterEvent::Topology(Topology::AddNode(address), None))
                .ok();
        };
        overclock::spawn_task(&format!("cluster restarting {} node", address), restart_node_task);
    }
    async fn update_service_status(&self, rt: &mut Rt<Self, ScyllaHandle>) {
        if self.nodes.iter().all(|(_address, node_info)| {
            if let Some(ms_node) = rt.service().microservices().get(&node_info.scope_id) {
                ms_node.is_running()
            } else {
                false
            }
        }) {
            if !rt.service().is_running() {
                log::info!("Cluster is Running");
            }
            rt.update_status(ServiceStatus::Running).await;
        } else {
            if rt.microservices_stopped() {
                if self.nodes.is_empty() {
                    log::warn!("Cluster is Idle");
                    rt.update_status(ServiceStatus::Idle).await;
                } else {
                    log::warn!("Cluster is experiencing an Outage");
                    rt.update_status(ServiceStatus::Outage).await;
                }
            } else {
                log::warn!("Cluster is Degraded");
                rt.update_status(ServiceStatus::Degraded).await;
            }
        }
    }
    fn build_healthy_ring(&mut self, mut registry: Registry, scylla: &Scylla) {
        // check if all nodes do have entries for their stages in the registry
        let mut healthy_nodes: HashMap<SocketAddr, NodeInfo> = HashMap::new();
        self.nodes.iter().for_each(|(addr, info)| {
            let mut stage_addr_key = addr.clone();
            let mut healthy = true;
            for shard_id in 0..info.shard_count {
                stage_addr_key.set_port(shard_id);
                healthy &= registry.contains_key(&stage_addr_key);
            }
            if healthy {
                healthy_nodes.insert(addr.clone(), info.clone());
            } else {
                // delete all the node's entries from registry
                for shard_id in 0..info.shard_count {
                    stage_addr_key.set_port(shard_id);
                    registry.remove(&stage_addr_key);
                }
                log::warn!("Removing unhealthy {} node from the Ring", addr);
            }
        });

        if healthy_nodes.is_empty() {
            SharedRing::drop();
            log::warn!("Enforcing healthy empty Ring");
        } else {
            SharedRing::new(
                &scylla.local_dc,
                registry,
                self.keyspaces.clone(),
                scylla.reporter_count,
                &self.nodes,
            )
            .commit();
            if self.nodes.len() != healthy_nodes.len() {
                log::warn!("Enforcing healthy Ring with only {} healthy nodes", healthy_nodes.len());
            } else {
                log::info!("Building stable Ring with {} nodes", self.nodes.len());
            }
        }
    }
}

#[async_trait]
/// The public interface of cluster handle, it enables adding/removing and building ring.
/// Note: you must invoke build ring to expose the changes
pub trait ClusterHandleExt {
    /// Add scylla node to the cluster,
    async fn add_node(&self, node: SocketAddr) -> TopologyResponse;
    /// Remove scylla node from the cluster
    async fn remove_node(&self, address: SocketAddr) -> TopologyResponse;
    /// Upsert (insert or update) keyspace
    async fn upsert_keyspace(&self, keyspace_config: KeyspaceConfig) -> TopologyResponse;
    /// remove keyspace
    async fn remove_keyspace(&self, keyspace_name: &str) -> TopologyResponse;
    /// Build ring with uniform replication factor
    async fn build_ring(&self) -> TopologyResponse;
}

#[async_trait]
impl ClusterHandleExt for UnboundedHandle<ClusterEvent> {
    async fn add_node(&self, address: SocketAddr) -> TopologyResponse {
        let (tx, rx) = tokio::sync::oneshot::channel();
        let event = ClusterEvent::Topology(Topology::AddNode(address), Some(TopologyResponder::OneShot(tx)));
        self.send(event)
            .map_err(|_| TopologyErr::new(format!("Unable to add {} node, error: closed cluster handle", address)))?;
        rx.await.map_err(|_| {
            TopologyErr::new(format!(
                "Unable to add {} node, error: closed oneshot receiver",
                address
            ))
        })?
    }
    async fn upsert_keyspace(&self, keyspace_config: KeyspaceConfig) -> TopologyResponse {
        let (tx, rx) = tokio::sync::oneshot::channel();
        let event = ClusterEvent::Topology(
            Topology::UpsertKeyspace(keyspace_config.clone()),
            Some(TopologyResponder::OneShot(tx)),
        );
        self.send(event).map_err(|_| {
            TopologyErr::new(format!(
                "Unable to upsert/add keyspace {:?}, error: closed cluster handle",
                keyspace_config
            ))
        })?;
        rx.await.map_err(|_| {
            TopologyErr::new(format!(
                "Unable to upsert/add keyspace {:?}, error: closed oneshot receiver",
                keyspace_config
            ))
        })?
    }
    async fn remove_keyspace(&self, keyspace_name: &str) -> TopologyResponse {
        let (tx, rx) = tokio::sync::oneshot::channel();
        let event = ClusterEvent::Topology(
            Topology::RemoveKeyspace(keyspace_name.into()),
            Some(TopologyResponder::OneShot(tx)),
        );
        self.send(event).map_err(|_| {
            TopologyErr::new(format!(
                "Unable to remove keyspace {}, error: closed cluster handle",
                keyspace_name
            ))
        })?;
        rx.await.map_err(|_| {
            TopologyErr::new(format!(
                "Unable to remove keyspace {}, error: closed oneshot receiver",
                keyspace_name
            ))
        })?
    }
    async fn remove_node(&self, address: SocketAddr) -> TopologyResponse {
        let (tx, rx) = tokio::sync::oneshot::channel();
        let event = ClusterEvent::Topology(Topology::RemoveNode(address), Some(TopologyResponder::OneShot(tx)));
        self.send(event).map_err(|_| {
            TopologyErr::new(format!(
                "Unable to remove {} node, error: closed cluster handle",
                address
            ))
        })?;
        rx.await.map_err(|_| {
            TopologyErr::new(format!(
                "Unable to remove {} node, error: closed oneshot receiver",
                address
            ))
        })?
    }
    async fn build_ring(&self) -> TopologyResponse {
        let (tx, rx) = tokio::sync::oneshot::channel();
        let event = ClusterEvent::Topology(Topology::BuildRing, Some(TopologyResponder::OneShot(tx)));
        self.send(event)
            .map_err(|_| TopologyErr::new(format!("Unable to build ring, error: closed cluster handle")))?;
        rx.await
            .map_err(|_| TopologyErr::new(format!("Unable to build ring, error: closed oneshot receiver")))?
    }
}