Skip to main content

nominal_api/conjure/endpoints/ingest/api/
ingest_service.rs

1use conjure_http::endpoint;
2/// The Ingest Service handles the data ingestion into Nominal/Clickhouse.
3#[conjure_http::conjure_endpoints(
4    name = "IngestService",
5    use_legacy_error_serialization
6)]
7pub trait IngestService {
8    /// Triggers an ingest job, allowing either creating a new dataset or uploading to an
9    /// existing one. This endpoint is meant to supersede all other ingestion endpoints as their functionality
10    /// gets migrated to this one.
11    #[endpoint(
12        method = POST,
13        path = "/ingest/v1/ingest",
14        name = "ingest",
15        produces = conjure_http::server::StdResponseSerializer
16    )]
17    fn ingest(
18        &self,
19        #[auth]
20        auth_: conjure_object::BearerToken,
21        #[body(
22            deserializer = conjure_http::server::StdRequestDeserializer,
23            log_as = "triggerIngest"
24        )]
25        trigger_ingest: super::super::super::super::objects::ingest::api::IngestRequest,
26    ) -> Result<
27        super::super::super::super::objects::ingest::api::IngestResponse,
28        conjure_http::private::Error,
29    >;
30    /// Triggers an ingest job using an existing ingest job RID.
31    /// Returns the same response format as the /ingest endpoint.
32    #[endpoint(
33        method = POST,
34        path = "/ingest/v1/re-ingest",
35        name = "rerunIngest",
36        produces = conjure_http::server::StdResponseSerializer
37    )]
38    fn rerun_ingest(
39        &self,
40        #[auth]
41        auth_: conjure_object::BearerToken,
42        #[body(deserializer = conjure_http::server::StdRequestDeserializer, safe)]
43        request: super::super::super::super::objects::ingest::api::RerunIngestRequest,
44    ) -> Result<
45        super::super::super::super::objects::ingest::api::IngestResponse,
46        conjure_http::private::Error,
47    >;
48    /// Creates a run and ingests data sources to be added to the run.
49    #[endpoint(
50        method = POST,
51        path = "/ingest/v1/ingest-run",
52        name = "ingestRun",
53        produces = conjure_http::server::StdResponseSerializer
54    )]
55    fn ingest_run(
56        &self,
57        #[auth]
58        auth_: conjure_object::BearerToken,
59        #[body(deserializer = conjure_http::server::StdRequestDeserializer)]
60        request: super::super::super::super::objects::ingest::api::IngestRunRequest,
61    ) -> Result<
62        super::super::super::super::objects::ingest::api::IngestRunResponse,
63        conjure_http::private::Error,
64    >;
65    /// Ingests video data from a S3 Nominal upload bucket.
66    #[endpoint(
67        method = POST,
68        path = "/ingest/v1/ingest-video",
69        name = "ingestVideo",
70        produces = conjure_http::server::StdResponseSerializer
71    )]
72    fn ingest_video(
73        &self,
74        #[auth]
75        auth_: conjure_object::BearerToken,
76        #[body(
77            deserializer = conjure_http::server::StdRequestDeserializer,
78            log_as = "ingestVideo"
79        )]
80        ingest_video: super::super::super::super::objects::ingest::api::IngestVideoRequest,
81    ) -> Result<
82        super::super::super::super::objects::ingest::api::IngestVideoResponse,
83        conjure_http::private::Error,
84    >;
85    /// Re-ingests data from provided source datasets into either an existing target dataset, or a new one.
86    /// Only supported for CSV and Parquet dataset files.
87    /// Will only reingest dataset files and will drop streaming data from datasets.
88    #[endpoint(
89        method = POST,
90        path = "/ingest/v1/reingest-dataset-files",
91        name = "reingestFromDatasets",
92        produces = conjure_http::server::StdResponseSerializer
93    )]
94    fn reingest_from_datasets(
95        &self,
96        #[auth]
97        auth_: conjure_object::BearerToken,
98        #[body(deserializer = conjure_http::server::StdRequestDeserializer)]
99        request: super::super::super::super::objects::ingest::api::ReingestDatasetsRequest,
100    ) -> Result<
101        super::super::super::super::objects::ingest::api::ReingestDatasetsResponse,
102        conjure_http::private::Error,
103    >;
104    /// This is a best effort deletion of the file's data based on the ingestedAt timestamp. This is an unreversible
105    /// action. Only v2 dataset file deletion is supported.
106    /// !!!WARNING!!!
107    /// It's possible that the file has overwritten points, in which case, those older points will not be recovered.
108    /// Only use this endpoint if this is acceptable, the data across files are disjoint, or you're willing to
109    /// re-ingest files to manually recover older points.
110    #[endpoint(
111        method = DELETE,
112        path = "/ingest/v1/delete-file/{datasetRid}/file/{fileId}",
113        name = "deleteFile"
114    )]
115    fn delete_file(
116        &self,
117        #[auth]
118        auth_: conjure_object::BearerToken,
119        #[path(
120            name = "datasetRid",
121            decoder = conjure_http::server::conjure::FromPlainDecoder,
122            log_as = "datasetRid",
123            safe
124        )]
125        dataset_rid: super::super::super::super::objects::api::rids::DatasetRid,
126        #[path(
127            name = "fileId",
128            decoder = conjure_http::server::conjure::FromPlainDecoder,
129            log_as = "fileId"
130        )]
131        file_id: conjure_object::Uuid,
132    ) -> Result<(), conjure_http::private::Error>;
133}
134/// The Ingest Service handles the data ingestion into Nominal/Clickhouse.
135#[conjure_http::conjure_endpoints(
136    name = "IngestService",
137    use_legacy_error_serialization
138)]
139pub trait AsyncIngestService {
140    /// Triggers an ingest job, allowing either creating a new dataset or uploading to an
141    /// existing one. This endpoint is meant to supersede all other ingestion endpoints as their functionality
142    /// gets migrated to this one.
143    #[endpoint(
144        method = POST,
145        path = "/ingest/v1/ingest",
146        name = "ingest",
147        produces = conjure_http::server::StdResponseSerializer
148    )]
149    async fn ingest(
150        &self,
151        #[auth]
152        auth_: conjure_object::BearerToken,
153        #[body(
154            deserializer = conjure_http::server::StdRequestDeserializer,
155            log_as = "triggerIngest"
156        )]
157        trigger_ingest: super::super::super::super::objects::ingest::api::IngestRequest,
158    ) -> Result<
159        super::super::super::super::objects::ingest::api::IngestResponse,
160        conjure_http::private::Error,
161    >;
162    /// Triggers an ingest job using an existing ingest job RID.
163    /// Returns the same response format as the /ingest endpoint.
164    #[endpoint(
165        method = POST,
166        path = "/ingest/v1/re-ingest",
167        name = "rerunIngest",
168        produces = conjure_http::server::StdResponseSerializer
169    )]
170    async fn rerun_ingest(
171        &self,
172        #[auth]
173        auth_: conjure_object::BearerToken,
174        #[body(deserializer = conjure_http::server::StdRequestDeserializer, safe)]
175        request: super::super::super::super::objects::ingest::api::RerunIngestRequest,
176    ) -> Result<
177        super::super::super::super::objects::ingest::api::IngestResponse,
178        conjure_http::private::Error,
179    >;
180    /// Creates a run and ingests data sources to be added to the run.
181    #[endpoint(
182        method = POST,
183        path = "/ingest/v1/ingest-run",
184        name = "ingestRun",
185        produces = conjure_http::server::StdResponseSerializer
186    )]
187    async fn ingest_run(
188        &self,
189        #[auth]
190        auth_: conjure_object::BearerToken,
191        #[body(deserializer = conjure_http::server::StdRequestDeserializer)]
192        request: super::super::super::super::objects::ingest::api::IngestRunRequest,
193    ) -> Result<
194        super::super::super::super::objects::ingest::api::IngestRunResponse,
195        conjure_http::private::Error,
196    >;
197    /// Ingests video data from a S3 Nominal upload bucket.
198    #[endpoint(
199        method = POST,
200        path = "/ingest/v1/ingest-video",
201        name = "ingestVideo",
202        produces = conjure_http::server::StdResponseSerializer
203    )]
204    async fn ingest_video(
205        &self,
206        #[auth]
207        auth_: conjure_object::BearerToken,
208        #[body(
209            deserializer = conjure_http::server::StdRequestDeserializer,
210            log_as = "ingestVideo"
211        )]
212        ingest_video: super::super::super::super::objects::ingest::api::IngestVideoRequest,
213    ) -> Result<
214        super::super::super::super::objects::ingest::api::IngestVideoResponse,
215        conjure_http::private::Error,
216    >;
217    /// Re-ingests data from provided source datasets into either an existing target dataset, or a new one.
218    /// Only supported for CSV and Parquet dataset files.
219    /// Will only reingest dataset files and will drop streaming data from datasets.
220    #[endpoint(
221        method = POST,
222        path = "/ingest/v1/reingest-dataset-files",
223        name = "reingestFromDatasets",
224        produces = conjure_http::server::StdResponseSerializer
225    )]
226    async fn reingest_from_datasets(
227        &self,
228        #[auth]
229        auth_: conjure_object::BearerToken,
230        #[body(deserializer = conjure_http::server::StdRequestDeserializer)]
231        request: super::super::super::super::objects::ingest::api::ReingestDatasetsRequest,
232    ) -> Result<
233        super::super::super::super::objects::ingest::api::ReingestDatasetsResponse,
234        conjure_http::private::Error,
235    >;
236    /// This is a best effort deletion of the file's data based on the ingestedAt timestamp. This is an unreversible
237    /// action. Only v2 dataset file deletion is supported.
238    /// !!!WARNING!!!
239    /// It's possible that the file has overwritten points, in which case, those older points will not be recovered.
240    /// Only use this endpoint if this is acceptable, the data across files are disjoint, or you're willing to
241    /// re-ingest files to manually recover older points.
242    #[endpoint(
243        method = DELETE,
244        path = "/ingest/v1/delete-file/{datasetRid}/file/{fileId}",
245        name = "deleteFile"
246    )]
247    async fn delete_file(
248        &self,
249        #[auth]
250        auth_: conjure_object::BearerToken,
251        #[path(
252            name = "datasetRid",
253            decoder = conjure_http::server::conjure::FromPlainDecoder,
254            log_as = "datasetRid",
255            safe
256        )]
257        dataset_rid: super::super::super::super::objects::api::rids::DatasetRid,
258        #[path(
259            name = "fileId",
260            decoder = conjure_http::server::conjure::FromPlainDecoder,
261            log_as = "fileId"
262        )]
263        file_id: conjure_object::Uuid,
264    ) -> Result<(), conjure_http::private::Error>;
265}
266/// The Ingest Service handles the data ingestion into Nominal/Clickhouse.
267#[conjure_http::conjure_endpoints(
268    name = "IngestService",
269    use_legacy_error_serialization,
270    local
271)]
272pub trait LocalAsyncIngestService {
273    /// Triggers an ingest job, allowing either creating a new dataset or uploading to an
274    /// existing one. This endpoint is meant to supersede all other ingestion endpoints as their functionality
275    /// gets migrated to this one.
276    #[endpoint(
277        method = POST,
278        path = "/ingest/v1/ingest",
279        name = "ingest",
280        produces = conjure_http::server::StdResponseSerializer
281    )]
282    async fn ingest(
283        &self,
284        #[auth]
285        auth_: conjure_object::BearerToken,
286        #[body(
287            deserializer = conjure_http::server::StdRequestDeserializer,
288            log_as = "triggerIngest"
289        )]
290        trigger_ingest: super::super::super::super::objects::ingest::api::IngestRequest,
291    ) -> Result<
292        super::super::super::super::objects::ingest::api::IngestResponse,
293        conjure_http::private::Error,
294    >;
295    /// Triggers an ingest job using an existing ingest job RID.
296    /// Returns the same response format as the /ingest endpoint.
297    #[endpoint(
298        method = POST,
299        path = "/ingest/v1/re-ingest",
300        name = "rerunIngest",
301        produces = conjure_http::server::StdResponseSerializer
302    )]
303    async fn rerun_ingest(
304        &self,
305        #[auth]
306        auth_: conjure_object::BearerToken,
307        #[body(deserializer = conjure_http::server::StdRequestDeserializer, safe)]
308        request: super::super::super::super::objects::ingest::api::RerunIngestRequest,
309    ) -> Result<
310        super::super::super::super::objects::ingest::api::IngestResponse,
311        conjure_http::private::Error,
312    >;
313    /// Creates a run and ingests data sources to be added to the run.
314    #[endpoint(
315        method = POST,
316        path = "/ingest/v1/ingest-run",
317        name = "ingestRun",
318        produces = conjure_http::server::StdResponseSerializer
319    )]
320    async fn ingest_run(
321        &self,
322        #[auth]
323        auth_: conjure_object::BearerToken,
324        #[body(deserializer = conjure_http::server::StdRequestDeserializer)]
325        request: super::super::super::super::objects::ingest::api::IngestRunRequest,
326    ) -> Result<
327        super::super::super::super::objects::ingest::api::IngestRunResponse,
328        conjure_http::private::Error,
329    >;
330    /// Ingests video data from a S3 Nominal upload bucket.
331    #[endpoint(
332        method = POST,
333        path = "/ingest/v1/ingest-video",
334        name = "ingestVideo",
335        produces = conjure_http::server::StdResponseSerializer
336    )]
337    async fn ingest_video(
338        &self,
339        #[auth]
340        auth_: conjure_object::BearerToken,
341        #[body(
342            deserializer = conjure_http::server::StdRequestDeserializer,
343            log_as = "ingestVideo"
344        )]
345        ingest_video: super::super::super::super::objects::ingest::api::IngestVideoRequest,
346    ) -> Result<
347        super::super::super::super::objects::ingest::api::IngestVideoResponse,
348        conjure_http::private::Error,
349    >;
350    /// Re-ingests data from provided source datasets into either an existing target dataset, or a new one.
351    /// Only supported for CSV and Parquet dataset files.
352    /// Will only reingest dataset files and will drop streaming data from datasets.
353    #[endpoint(
354        method = POST,
355        path = "/ingest/v1/reingest-dataset-files",
356        name = "reingestFromDatasets",
357        produces = conjure_http::server::StdResponseSerializer
358    )]
359    async fn reingest_from_datasets(
360        &self,
361        #[auth]
362        auth_: conjure_object::BearerToken,
363        #[body(deserializer = conjure_http::server::StdRequestDeserializer)]
364        request: super::super::super::super::objects::ingest::api::ReingestDatasetsRequest,
365    ) -> Result<
366        super::super::super::super::objects::ingest::api::ReingestDatasetsResponse,
367        conjure_http::private::Error,
368    >;
369    /// This is a best effort deletion of the file's data based on the ingestedAt timestamp. This is an unreversible
370    /// action. Only v2 dataset file deletion is supported.
371    /// !!!WARNING!!!
372    /// It's possible that the file has overwritten points, in which case, those older points will not be recovered.
373    /// Only use this endpoint if this is acceptable, the data across files are disjoint, or you're willing to
374    /// re-ingest files to manually recover older points.
375    #[endpoint(
376        method = DELETE,
377        path = "/ingest/v1/delete-file/{datasetRid}/file/{fileId}",
378        name = "deleteFile"
379    )]
380    async fn delete_file(
381        &self,
382        #[auth]
383        auth_: conjure_object::BearerToken,
384        #[path(
385            name = "datasetRid",
386            decoder = conjure_http::server::conjure::FromPlainDecoder,
387            log_as = "datasetRid",
388            safe
389        )]
390        dataset_rid: super::super::super::super::objects::api::rids::DatasetRid,
391        #[path(
392            name = "fileId",
393            decoder = conjure_http::server::conjure::FromPlainDecoder,
394            log_as = "fileId"
395        )]
396        file_id: conjure_object::Uuid,
397    ) -> Result<(), conjure_http::private::Error>;
398}