1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
#![cfg(feature = "default")]
use crate::api::init_api_server;

pub use crate::miner::Connection;
pub use crate::MinerList;
use async_std::net::TcpListener;
use async_std::sync::{Arc, Mutex};
use async_std::task;
use futures::io::BufReader;
use futures::io::{AsyncBufReadExt, AsyncReadExt, AsyncWriteExt};
use futures::StreamExt;
use log::info;
use serde::Deserialize;
use std::time::Duration;
use stratum_types::traits::{DataProvider, StratumManager, StratumParams};
use stratum_types::Result;

//@todo turn this into the "server-builder syntax"
//on new
#[derive(Clone)]
pub struct StratumServer<SM: StratumManager> {
    config: ServerConfig,
    data_provider: Arc<SM::DataProvider>,
    auth_manager: Arc<SM::AuthManager>,
    block_validator: Arc<SM::BlockValidator>,
    connection_list: Arc<MinerList<SM>>,
    //@todo I don't want this to be an option. We'll have to force defaults though.
    //@todo actually no idea why I'm even storing this. If we pass the data_provider to all the
    //miners, then we can just tell them to pull it from the arc to the data_provider.
    current_job: Arc<Mutex<Option<<SM::StratumParams as StratumParams>::Notify>>>,
}

//@todo put into builder
#[derive(Clone, Debug, Deserialize)]
pub struct ServerConfig {
    pub host: String,
    pub port: u16,
    pub max_connections: Option<usize>,
}

impl<SM> StratumServer<SM>
where
    SM: StratumManager + 'static,
{
    pub fn new(
        config: ServerConfig,
        data_provider: Arc<SM::DataProvider>,
        auth_manager: Arc<SM::AuthManager>,
        block_validator: Arc<SM::BlockValidator>,
    ) -> Self {
        let connection_list = Arc::new(MinerList::new());
        let current_job = Arc::new(Mutex::new(None));

        StratumServer {
            connection_list,
            config,
            data_provider,
            auth_manager,
            block_validator,
            current_job,
        }
    }

    //Initialize the server before we want to start accepting any connections.
    async fn init(&self) -> Result<()> {
        info!("Initializing...");
        self.init_data_provider().await?;
        info!("Data Provider Initialized");

        if cfg!(feature = "default") {
            init_api_server().await?;
            info!("API Server Initialized");
        }

        info!("Initialization Complete");
        Ok(())
    }

    async fn init_data_provider(&self) -> Result<()> {
        self.data_provider.init().await?;

        let data_provider = self.data_provider.clone();

        self.set_current_job(data_provider.get_job().await?).await;

        let connection_list = self.connection_list.clone();
        let self_clone = Arc::new(self.clone());

        //Spawn the data provider polling.
        task::spawn(async move {
            //@todo could (should?) use inerval here instead.
            loop {
                // let mut interval = Interval::new(Duration::from_secs(2));
                task::sleep(Duration::from_secs(2)).await;
                if data_provider.poll_template().await.unwrap() {
                    let job = data_provider.get_job().await.unwrap();
                    self_clone.set_current_job(job.clone()).await;
                    connection_list.broadcast_new_job(job).await.unwrap();
                }
            }
        });

        Ok(())
    }

    async fn set_current_job(&self, job: <SM::StratumParams as StratumParams>::Notify) {
        *self.current_job.lock().await = Some(job);
    }

    pub async fn start(&self) -> Result<()> {
        self.init().await?;

        let listening_host = format!("{}:{}", self.config.host, self.config.port);

        let listener = TcpListener::bind(&listening_host).await?;
        let mut incoming = listener.incoming();

        info!("Listening on {}", listening_host);

        while let Some(stream) = incoming.next().await {
            let stream = stream?;
            let addr = stream.peer_addr()?;

            //If Proxy === true, then we don't get this information from the stream....
            //We need to read a newline, and then parse the proxy info from that. AND Then
            //create a new miner from that information.
            //
            let mut buf = String::new();
            let (rh, wh) = stream.split();
            let mut buffer_stream = BufReader::new(rh);
            buffer_stream.read_line(&mut buf).await?;

            dbg!(buf);

            //let connection = Arc::new(Connection::new(
            //    stream,
            //    self.data_provider.clone(),
            //    self.auth_manager.clone(),
            //    self.block_validator.clone(),
            //));

            //self.connection_list.add_miner(connection.clone()).await?;

            //task::spawn(async move {
            //    info!("Accepting stream from: {}", addr);

            //    let new_connection = connection.clone();

            //    //Unused for now, but may be useful for logging or bans
            //    let _result = new_connection.start().await;

            //    //TODO here remove new connection from the list of connections, which means we
            //    //probably need to pass this a mutex of connections
            //    //@todo don't always kill drop a connection from the list.
            //    //We should keep a cache of these connections, as if someone is a dropped
            //    //connection, they may want to reconnect w/ a similar ID.
            //    //That also being said, feels like we should index these connections via ID and not
            //    //IP. @todo.

            //    info!("Closing stream from: {}", addr);
            //});
        }
        Ok(())
    }
}