Skip to main content

nominal_api/conjure/clients/ingest/api/
ingest_service.rs

1use conjure_http::endpoint;
2/// The Ingest Service handles the data ingestion into Nominal/Clickhouse.
3#[conjure_http::conjure_client(name = "IngestService")]
4pub trait IngestService<
5    #[response_body]
6    I: Iterator<
7            Item = Result<conjure_http::private::Bytes, conjure_http::private::Error>,
8        >,
9> {
10    /// Triggers an ingest job, allowing either creating a new dataset or uploading to an
11    /// existing one. This endpoint is meant to supersede all other ingestion endpoints as their functionality
12    /// gets migrated to this one.
13    #[endpoint(
14        method = POST,
15        path = "/ingest/v1/ingest",
16        name = "ingest",
17        accept = conjure_http::client::StdResponseDeserializer
18    )]
19    fn ingest(
20        &self,
21        #[auth]
22        auth_: &conjure_object::BearerToken,
23        #[body(serializer = conjure_http::client::StdRequestSerializer)]
24        trigger_ingest: &super::super::super::super::objects::ingest::api::IngestRequest,
25    ) -> Result<
26        super::super::super::super::objects::ingest::api::IngestResponse,
27        conjure_http::private::Error,
28    >;
29    /// Triggers an ingest job using an existing ingest job RID.
30    /// Returns the same response format as the /ingest endpoint.
31    #[endpoint(
32        method = POST,
33        path = "/ingest/v1/re-ingest",
34        name = "rerunIngest",
35        accept = conjure_http::client::StdResponseDeserializer
36    )]
37    fn rerun_ingest(
38        &self,
39        #[auth]
40        auth_: &conjure_object::BearerToken,
41        #[body(serializer = conjure_http::client::StdRequestSerializer)]
42        request: &super::super::super::super::objects::ingest::api::RerunIngestRequest,
43    ) -> Result<
44        super::super::super::super::objects::ingest::api::IngestResponse,
45        conjure_http::private::Error,
46    >;
47    /// Creates a run and ingests data sources to be added to the run.
48    #[endpoint(
49        method = POST,
50        path = "/ingest/v1/ingest-run",
51        name = "ingestRun",
52        accept = conjure_http::client::StdResponseDeserializer
53    )]
54    fn ingest_run(
55        &self,
56        #[auth]
57        auth_: &conjure_object::BearerToken,
58        #[body(serializer = conjure_http::client::StdRequestSerializer)]
59        request: &super::super::super::super::objects::ingest::api::IngestRunRequest,
60    ) -> Result<
61        super::super::super::super::objects::ingest::api::IngestRunResponse,
62        conjure_http::private::Error,
63    >;
64    /// Ingests video data from a S3 Nominal upload bucket.
65    #[endpoint(
66        method = POST,
67        path = "/ingest/v1/ingest-video",
68        name = "ingestVideo",
69        accept = conjure_http::client::StdResponseDeserializer
70    )]
71    fn ingest_video(
72        &self,
73        #[auth]
74        auth_: &conjure_object::BearerToken,
75        #[body(serializer = conjure_http::client::StdRequestSerializer)]
76        ingest_video: &super::super::super::super::objects::ingest::api::IngestVideoRequest,
77    ) -> Result<
78        super::super::super::super::objects::ingest::api::IngestVideoResponse,
79        conjure_http::private::Error,
80    >;
81    /// Re-ingests data from provided source datasets into either an existing target dataset, or a new one.
82    /// Only supported for CSV and Parquet dataset files.
83    /// Will only reingest dataset files and will drop streaming data from datasets.
84    #[endpoint(
85        method = POST,
86        path = "/ingest/v1/reingest-dataset-files",
87        name = "reingestFromDatasets",
88        accept = conjure_http::client::StdResponseDeserializer
89    )]
90    fn reingest_from_datasets(
91        &self,
92        #[auth]
93        auth_: &conjure_object::BearerToken,
94        #[body(serializer = conjure_http::client::StdRequestSerializer)]
95        request: &super::super::super::super::objects::ingest::api::ReingestDatasetsRequest,
96    ) -> Result<
97        super::super::super::super::objects::ingest::api::ReingestDatasetsResponse,
98        conjure_http::private::Error,
99    >;
100    /// This is a best effort deletion of the file's data based on the ingestedAt timestamp. This is an unreversible
101    /// action. Only v2 dataset file deletion is supported.
102    /// !!!WARNING!!!
103    /// It's possible that the file has overwritten points, in which case, those older points will not be recovered.
104    /// Only use this endpoint if this is acceptable, the data across files are disjoint, or you're willing to
105    /// re-ingest files to manually recover older points.
106    #[endpoint(
107        method = DELETE,
108        path = "/ingest/v1/delete-file/{datasetRid}/file/{fileId}",
109        name = "deleteFile",
110        accept = conjure_http::client::conjure::EmptyResponseDeserializer
111    )]
112    fn delete_file(
113        &self,
114        #[auth]
115        auth_: &conjure_object::BearerToken,
116        #[path(
117            name = "datasetRid",
118            encoder = conjure_http::client::conjure::PlainEncoder
119        )]
120        dataset_rid: &super::super::super::super::objects::api::rids::DatasetRid,
121        #[path(name = "fileId", encoder = conjure_http::client::conjure::PlainEncoder)]
122        file_id: conjure_object::Uuid,
123    ) -> Result<(), conjure_http::private::Error>;
124}
125/// The Ingest Service handles the data ingestion into Nominal/Clickhouse.
126#[conjure_http::conjure_client(name = "IngestService")]
127pub trait AsyncIngestService<
128    #[response_body]
129    I: conjure_http::private::Stream<
130            Item = Result<conjure_http::private::Bytes, conjure_http::private::Error>,
131        >,
132> {
133    /// Triggers an ingest job, allowing either creating a new dataset or uploading to an
134    /// existing one. This endpoint is meant to supersede all other ingestion endpoints as their functionality
135    /// gets migrated to this one.
136    #[endpoint(
137        method = POST,
138        path = "/ingest/v1/ingest",
139        name = "ingest",
140        accept = conjure_http::client::StdResponseDeserializer
141    )]
142    async fn ingest(
143        &self,
144        #[auth]
145        auth_: &conjure_object::BearerToken,
146        #[body(serializer = conjure_http::client::StdRequestSerializer)]
147        trigger_ingest: &super::super::super::super::objects::ingest::api::IngestRequest,
148    ) -> Result<
149        super::super::super::super::objects::ingest::api::IngestResponse,
150        conjure_http::private::Error,
151    >;
152    /// Triggers an ingest job using an existing ingest job RID.
153    /// Returns the same response format as the /ingest endpoint.
154    #[endpoint(
155        method = POST,
156        path = "/ingest/v1/re-ingest",
157        name = "rerunIngest",
158        accept = conjure_http::client::StdResponseDeserializer
159    )]
160    async fn rerun_ingest(
161        &self,
162        #[auth]
163        auth_: &conjure_object::BearerToken,
164        #[body(serializer = conjure_http::client::StdRequestSerializer)]
165        request: &super::super::super::super::objects::ingest::api::RerunIngestRequest,
166    ) -> Result<
167        super::super::super::super::objects::ingest::api::IngestResponse,
168        conjure_http::private::Error,
169    >;
170    /// Creates a run and ingests data sources to be added to the run.
171    #[endpoint(
172        method = POST,
173        path = "/ingest/v1/ingest-run",
174        name = "ingestRun",
175        accept = conjure_http::client::StdResponseDeserializer
176    )]
177    async fn ingest_run(
178        &self,
179        #[auth]
180        auth_: &conjure_object::BearerToken,
181        #[body(serializer = conjure_http::client::StdRequestSerializer)]
182        request: &super::super::super::super::objects::ingest::api::IngestRunRequest,
183    ) -> Result<
184        super::super::super::super::objects::ingest::api::IngestRunResponse,
185        conjure_http::private::Error,
186    >;
187    /// Ingests video data from a S3 Nominal upload bucket.
188    #[endpoint(
189        method = POST,
190        path = "/ingest/v1/ingest-video",
191        name = "ingestVideo",
192        accept = conjure_http::client::StdResponseDeserializer
193    )]
194    async fn ingest_video(
195        &self,
196        #[auth]
197        auth_: &conjure_object::BearerToken,
198        #[body(serializer = conjure_http::client::StdRequestSerializer)]
199        ingest_video: &super::super::super::super::objects::ingest::api::IngestVideoRequest,
200    ) -> Result<
201        super::super::super::super::objects::ingest::api::IngestVideoResponse,
202        conjure_http::private::Error,
203    >;
204    /// Re-ingests data from provided source datasets into either an existing target dataset, or a new one.
205    /// Only supported for CSV and Parquet dataset files.
206    /// Will only reingest dataset files and will drop streaming data from datasets.
207    #[endpoint(
208        method = POST,
209        path = "/ingest/v1/reingest-dataset-files",
210        name = "reingestFromDatasets",
211        accept = conjure_http::client::StdResponseDeserializer
212    )]
213    async fn reingest_from_datasets(
214        &self,
215        #[auth]
216        auth_: &conjure_object::BearerToken,
217        #[body(serializer = conjure_http::client::StdRequestSerializer)]
218        request: &super::super::super::super::objects::ingest::api::ReingestDatasetsRequest,
219    ) -> Result<
220        super::super::super::super::objects::ingest::api::ReingestDatasetsResponse,
221        conjure_http::private::Error,
222    >;
223    /// This is a best effort deletion of the file's data based on the ingestedAt timestamp. This is an unreversible
224    /// action. Only v2 dataset file deletion is supported.
225    /// !!!WARNING!!!
226    /// It's possible that the file has overwritten points, in which case, those older points will not be recovered.
227    /// Only use this endpoint if this is acceptable, the data across files are disjoint, or you're willing to
228    /// re-ingest files to manually recover older points.
229    #[endpoint(
230        method = DELETE,
231        path = "/ingest/v1/delete-file/{datasetRid}/file/{fileId}",
232        name = "deleteFile",
233        accept = conjure_http::client::conjure::EmptyResponseDeserializer
234    )]
235    async fn delete_file(
236        &self,
237        #[auth]
238        auth_: &conjure_object::BearerToken,
239        #[path(
240            name = "datasetRid",
241            encoder = conjure_http::client::conjure::PlainEncoder
242        )]
243        dataset_rid: &super::super::super::super::objects::api::rids::DatasetRid,
244        #[path(name = "fileId", encoder = conjure_http::client::conjure::PlainEncoder)]
245        file_id: conjure_object::Uuid,
246    ) -> Result<(), conjure_http::private::Error>;
247}
248/// The Ingest Service handles the data ingestion into Nominal/Clickhouse.
249#[conjure_http::conjure_client(name = "IngestService", local)]
250pub trait LocalAsyncIngestService<
251    #[response_body]
252    I: conjure_http::private::Stream<
253            Item = Result<conjure_http::private::Bytes, conjure_http::private::Error>,
254        >,
255> {
256    /// Triggers an ingest job, allowing either creating a new dataset or uploading to an
257    /// existing one. This endpoint is meant to supersede all other ingestion endpoints as their functionality
258    /// gets migrated to this one.
259    #[endpoint(
260        method = POST,
261        path = "/ingest/v1/ingest",
262        name = "ingest",
263        accept = conjure_http::client::StdResponseDeserializer
264    )]
265    async fn ingest(
266        &self,
267        #[auth]
268        auth_: &conjure_object::BearerToken,
269        #[body(serializer = conjure_http::client::StdRequestSerializer)]
270        trigger_ingest: &super::super::super::super::objects::ingest::api::IngestRequest,
271    ) -> Result<
272        super::super::super::super::objects::ingest::api::IngestResponse,
273        conjure_http::private::Error,
274    >;
275    /// Triggers an ingest job using an existing ingest job RID.
276    /// Returns the same response format as the /ingest endpoint.
277    #[endpoint(
278        method = POST,
279        path = "/ingest/v1/re-ingest",
280        name = "rerunIngest",
281        accept = conjure_http::client::StdResponseDeserializer
282    )]
283    async fn rerun_ingest(
284        &self,
285        #[auth]
286        auth_: &conjure_object::BearerToken,
287        #[body(serializer = conjure_http::client::StdRequestSerializer)]
288        request: &super::super::super::super::objects::ingest::api::RerunIngestRequest,
289    ) -> Result<
290        super::super::super::super::objects::ingest::api::IngestResponse,
291        conjure_http::private::Error,
292    >;
293    /// Creates a run and ingests data sources to be added to the run.
294    #[endpoint(
295        method = POST,
296        path = "/ingest/v1/ingest-run",
297        name = "ingestRun",
298        accept = conjure_http::client::StdResponseDeserializer
299    )]
300    async fn ingest_run(
301        &self,
302        #[auth]
303        auth_: &conjure_object::BearerToken,
304        #[body(serializer = conjure_http::client::StdRequestSerializer)]
305        request: &super::super::super::super::objects::ingest::api::IngestRunRequest,
306    ) -> Result<
307        super::super::super::super::objects::ingest::api::IngestRunResponse,
308        conjure_http::private::Error,
309    >;
310    /// Ingests video data from a S3 Nominal upload bucket.
311    #[endpoint(
312        method = POST,
313        path = "/ingest/v1/ingest-video",
314        name = "ingestVideo",
315        accept = conjure_http::client::StdResponseDeserializer
316    )]
317    async fn ingest_video(
318        &self,
319        #[auth]
320        auth_: &conjure_object::BearerToken,
321        #[body(serializer = conjure_http::client::StdRequestSerializer)]
322        ingest_video: &super::super::super::super::objects::ingest::api::IngestVideoRequest,
323    ) -> Result<
324        super::super::super::super::objects::ingest::api::IngestVideoResponse,
325        conjure_http::private::Error,
326    >;
327    /// Re-ingests data from provided source datasets into either an existing target dataset, or a new one.
328    /// Only supported for CSV and Parquet dataset files.
329    /// Will only reingest dataset files and will drop streaming data from datasets.
330    #[endpoint(
331        method = POST,
332        path = "/ingest/v1/reingest-dataset-files",
333        name = "reingestFromDatasets",
334        accept = conjure_http::client::StdResponseDeserializer
335    )]
336    async fn reingest_from_datasets(
337        &self,
338        #[auth]
339        auth_: &conjure_object::BearerToken,
340        #[body(serializer = conjure_http::client::StdRequestSerializer)]
341        request: &super::super::super::super::objects::ingest::api::ReingestDatasetsRequest,
342    ) -> Result<
343        super::super::super::super::objects::ingest::api::ReingestDatasetsResponse,
344        conjure_http::private::Error,
345    >;
346    /// This is a best effort deletion of the file's data based on the ingestedAt timestamp. This is an unreversible
347    /// action. Only v2 dataset file deletion is supported.
348    /// !!!WARNING!!!
349    /// It's possible that the file has overwritten points, in which case, those older points will not be recovered.
350    /// Only use this endpoint if this is acceptable, the data across files are disjoint, or you're willing to
351    /// re-ingest files to manually recover older points.
352    #[endpoint(
353        method = DELETE,
354        path = "/ingest/v1/delete-file/{datasetRid}/file/{fileId}",
355        name = "deleteFile",
356        accept = conjure_http::client::conjure::EmptyResponseDeserializer
357    )]
358    async fn delete_file(
359        &self,
360        #[auth]
361        auth_: &conjure_object::BearerToken,
362        #[path(
363            name = "datasetRid",
364            encoder = conjure_http::client::conjure::PlainEncoder
365        )]
366        dataset_rid: &super::super::super::super::objects::api::rids::DatasetRid,
367        #[path(name = "fileId", encoder = conjure_http::client::conjure::PlainEncoder)]
368        file_id: conjure_object::Uuid,
369    ) -> Result<(), conjure_http::private::Error>;
370}