tag2upload_service_manager/
db_data.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
//! Database
//!
//! See `JobStatus` for the states of a job,
//! and progression through the db.
//!
//! # Coalescing
//!
//!  1. We don't do, simultaneously in two jobs
//!
//!      - work on the same objectid
//!      - work on the same repo URL
//!
//!     Implemented in [`JobInWorkflow::start`].
//!
//!  2. We don't try fetching for an objectid, if we have other jobs
//!    for the same objectid which do have the tag data,
//!    depending on the other job's state:
//!
//!     1. **Noticed / Queued / Building**:
//!       They'll eventually progress to a different state.
//!       This job should wait.
//!       Implemented in `<gitab::Forge1 as SomeForge>::make_progress`.
//!
//!     2. **Irrecoverable / Uploaded**:
//!       This job is (presumably) Noticed or Queued.
//!       It will be marked a duplicate of that one,
//!       and takes on its status.
//!       Implemented in [`db_workflow::coalesce_completed`].
//!
//!     3. **Failed**:
//!       This job may proceed.
//!
//! # Replay / manual retry:
//!
//! We look at the timestamp (in the `tagger` line).
//! We don't accept tags more than SLOP In the future,
//! or more than MAX_AGE in the past.
//! We expire things from our database no earlier than MAX_AGE.
//!
//! If we get the same tag again (by git object id), it's ignored.
//! (see above).
//! There is no facility for manually forcing a retry.
//! The uploader should use a new version number.
//!
//! # Forge up status
//!
//! TODO forge up status is not implemented
//!
//! We remember for each forge whether we think it's up.
//!
//! We start out thinking the forge is down.
//! When we think the forge is down, we do a preflight
//! check (https request) to see if it seems to have come up.
//! We retry that check at increasing intervals.
//!
//! Whenever a job fails, we go back to thinking the forge is down.

use crate::prelude::*;

/// Guarantees:
///
/// * The configured repository and forge and calling IP address were fine
/// * The tag name is plausible
#[derive(Debug, Clone)]
#[derive(Deftly)]
#[derive_deftly(FromSqlRow, AsBSqlRow, UpdateSqlRow, UiMap)]
pub struct JobData {
    pub repo_git_url: String,
    pub tag_objectid: GitObjectId,
    pub tag_name: String,
    pub forge_host: Hostname,
    pub forge_namever: ForgeNamever,
    #[deftly(ui(skip))]
    pub forge_data: ForgeData,
    // We don't use this operationally, but it's very useful for reporting
    #[deftly(bsql(flatten), ui(flatten))]
    pub tag_meta: t2umeta::Parsed,
}

#[derive(Debug, Deftly, Clone)]
#[derive_deftly(FromSqlRow, AsBSqlRow, UpdateSqlRow)]
#[derive_deftly(UiMap, UpdateWorkerReport)]
pub struct JobRow {
    #[deftly(bsql(rowid))]
    pub jid: JobId,
    #[deftly(bsql(flatten), ui(flatten))]
    pub data: JobData,
    pub received: TimeT,
    pub last_update: TimeT,
    #[deftly(ui(skip))]
    pub tag_data: NoneIsEmpty<TagObjectData>,
    #[deftly(worker_report)]
    pub status: JobStatus,
    pub processing: NoneIsEmpty<ProcessingInfo>,
    #[deftly(worker_report)]
    pub info: String,
    pub duplicate_of: Option<JobId>,
}

#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[derive(Deftly, strum::EnumIter)]
#[derive_deftly(FromSqlEnum, ToSqlEnum, UiDisplayEnum)]
pub enum JobStatus {
    /// Webhook request has been received and queued.
    ///
    /// The webhook details including the tag name look plausible.
    /// We're happy it's at the right forge etc.
    ///
    /// Next state is Queued` or NotForUs
    Noticed,

    /// Tag object has been fetched, job is ready for an Oracle worker
    ///
    /// `JobRow.tag_data` is nonempty, and
    /// `JobRow.tag_objectid` has been confirmed locally.
    Queued,

    /// Job has been given to an Oracle worker for processing.
    ///
    /// This state ought to be accompanied by a connection
    /// from the worker, through which we will get the outcome.
    /// If it isn't (eg after restart), the job is irrecoverable.
    Building,

    /// Tag is *not* a (current) instruction to us
    NotForUs,

    /// Job has failed; other attempts for the same tag may work
    ///
    /// This is a possible next state from any of the other states.
    Failed,

    /// Job has failed; other attempts for the same tag are doomed
    ///
    /// This is a possible next state from any of the other states.
    Irrecoverable,

    /// Job has been completed successfully
    Uploaded,
}