1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
use std::collections::HashSet;
use std::fmt::Debug;

use chrono::prelude::*;
use lazy_static::__Deref;
use mysql_async::{Row, Value};
use serde::Serialize;

use nature_common::*;

use crate::models::define::*;
use crate::TaskDao;

#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq, Hash)]
pub struct RawTask {
    pub task_id: String,
    pub task_key: String,
    pub task_type: i8,
    pub task_for: String,
    pub task_state: i8,
    pub data: String,
    pub create_time: NaiveDateTime,
    pub execute_time: NaiveDateTime,
    pub retried_times: i16,
}

impl Default for RawTask {
    fn default() -> Self {
        RawTask {
            task_id: "".to_string(),
            task_key: "".to_string(),
            task_type: 0,
            task_for: "".to_string(),
            task_state: 0,
            data: "".to_string(),
            create_time: Local::now().naive_local(),
            execute_time: Local::now().naive_local(),
            retried_times: 0,
        }
    }
}

impl RawTask {
    pub fn new<T: Serialize + Debug>(task: &T, task_key: &str, task_type: i8, task_for: &str) -> Result<RawTask> {
        let json = serde_json::to_string(task)?;
        Self::from_str(&json, task_key, task_type, task_for)
    }

    pub fn from_str(json: &str, task_key: &str, task_type: i8, task_for: &str) -> Result<RawTask> {
        if json.len() > *TASK_CONTENT_MAX_LENGTH.deref() {
            return Err(NatureError::SystemError("data's length can' be over : ".to_owned() + &TASK_CONTENT_MAX_LENGTH.to_string()));
        }
        let time = Local::now().naive_local();
        let id = format!("{}{}{}{}", json, task_key, task_for, task_type);
        Ok(RawTask {
            task_id: format!("{:x}", generate_id(&id)?),
            task_key: task_key.to_string(),
            task_type,
            task_for: task_for.to_string(),
            task_state: 0,
            data: json.to_string(),
            create_time: time,
            execute_time: time,
            retried_times: 0,
        })
    }


    /// for performance reason, one-to-one carry which we can reuse the beginning carry to finish all flows.
    /// That way we need not to communicate with DB for create new and delete old carrier.
    /// But for failure we must redo from beginning. but I think it has small chance.
    /// Another disadvantage is the failure information will be attached to the beginning.
    pub fn finish_old<FI, FD>(&mut self, old: &RawTask, _dao_insert: FI, _dao_delete: FD) -> Result<usize>
        where FI: Fn(&RawTask) -> Result<usize>,
              FD: Fn(&[u8]) -> Result<usize>
    {
        // TODO  当遇到错误时如果要结束的 delivery ID 和新的delivery 不一样 需要结束之前的 delivery 并创建新的 delivery
        self.task_id = old.task_id.clone(); // the id is used for final finished
        Ok(1)
    }


    pub async fn save_batch<T>(news: &mut Vec<RawTask>, old_id: &str, task: &T) -> Result<()>
        where T: TaskDao
    {
        let mut will_deleted: HashSet<RawTask> = HashSet::new();
        for v in news.iter() {
            let num = task.insert(&v).await?;
            // drop repeated task avoid data consistent problem, retry.exe will pick it up
            if num != 1 {
                will_deleted.insert(v.clone());
            }
        }
        news.retain(|one| will_deleted.get(&one) != Some(&one));
        task.finish_task(old_id).await?;
        Ok(())
    }

    pub fn task_string(&self) -> String {
        format!("raw_task: key|type|for {}{}{}", self.task_key, self.task_type, self.task_for)
    }
}

impl From<Row> for RawTask {
    fn from(row: Row) -> Self {
        let (task_id, task_key, task_type, task_for, task_state, data, create_time, execute_time, retried_times) = mysql_async::from_row(row);
        RawTask {
            task_id,
            task_key,
            task_type,
            task_for,
            task_state,
            data,
            create_time,
            execute_time,
            retried_times,
        }
    }
}

impl Into<Vec<(String, Value)>> for RawTask {
    fn into(self) -> Vec<(String, Value)> {
        params! {
            "task_id" => self.task_id,
            "task_key" => self.task_key,
            "task_type" => self.task_type,
            "task_for" => self.task_for,
            "task_state" => self.task_state,
            "data" => self.data,
            "create_time" => self.create_time,
            "execute_time" => self.execute_time,
            "retried_times" => self.retried_times,
        }
    }
}

#[cfg(test)]
mod test {
    use std::collections::HashSet;

    #[derive(Clone, Eq, Hash, PartialEq)]
    struct MyTest(String);

    #[test]
    #[ignore]
    fn vec_test() {
        let mut input: Vec<MyTest> = vec![
            MyTest("a".to_string()),
            MyTest("b".to_string()),
            MyTest("a".to_string()),
        ];
        let mut will_deleted: HashSet<MyTest> = HashSet::new();
        for v in input.iter() {
            if v.0 == "b" {
                will_deleted.insert(v.clone());
            }
        }
        input.retain(|one| will_deleted.get(&one) != Some(&one));
        assert_eq!(2, input.len());
    }
}