nominal-api 0.1239.0

API bindings for the Nominal platform
Documentation
use conjure_http::endpoint;
/// The Ingest Service handles the data ingestion into Nominal/Clickhouse.
#[conjure_http::conjure_client(name = "IngestService")]
pub trait IngestService<
    #[response_body]
    I: Iterator<
            Item = Result<conjure_http::private::Bytes, conjure_http::private::Error>,
        >,
> {
    /// Triggers an ingest job, allowing either creating a new dataset or uploading to an
    /// existing one. This endpoint is meant to supersede all other ingestion endpoints as their functionality
    /// gets migrated to this one.
    #[endpoint(
        method = POST,
        path = "/ingest/v1/ingest",
        name = "ingest",
        accept = conjure_http::client::StdResponseDeserializer
    )]
    fn ingest(
        &self,
        #[auth]
        auth_: &conjure_object::BearerToken,
        #[body(serializer = conjure_http::client::StdRequestSerializer)]
        trigger_ingest: &super::super::super::super::objects::ingest::api::IngestRequest,
    ) -> Result<
        super::super::super::super::objects::ingest::api::IngestResponse,
        conjure_http::private::Error,
    >;
    /// Triggers an ingest job using an existing ingest job RID.
    /// Returns the same response format as the /ingest endpoint.
    #[endpoint(
        method = POST,
        path = "/ingest/v1/re-ingest",
        name = "rerunIngest",
        accept = conjure_http::client::StdResponseDeserializer
    )]
    fn rerun_ingest(
        &self,
        #[auth]
        auth_: &conjure_object::BearerToken,
        #[body(serializer = conjure_http::client::StdRequestSerializer)]
        request: &super::super::super::super::objects::ingest::api::RerunIngestRequest,
    ) -> Result<
        super::super::super::super::objects::ingest::api::IngestResponse,
        conjure_http::private::Error,
    >;
    /// Creates a run and ingests data sources to be added to the run.
    #[endpoint(
        method = POST,
        path = "/ingest/v1/ingest-run",
        name = "ingestRun",
        accept = conjure_http::client::StdResponseDeserializer
    )]
    fn ingest_run(
        &self,
        #[auth]
        auth_: &conjure_object::BearerToken,
        #[body(serializer = conjure_http::client::StdRequestSerializer)]
        request: &super::super::super::super::objects::ingest::api::IngestRunRequest,
    ) -> Result<
        super::super::super::super::objects::ingest::api::IngestRunResponse,
        conjure_http::private::Error,
    >;
    /// Ingests video data from a S3 Nominal upload bucket.
    #[endpoint(
        method = POST,
        path = "/ingest/v1/ingest-video",
        name = "ingestVideo",
        accept = conjure_http::client::StdResponseDeserializer
    )]
    fn ingest_video(
        &self,
        #[auth]
        auth_: &conjure_object::BearerToken,
        #[body(serializer = conjure_http::client::StdRequestSerializer)]
        ingest_video: &super::super::super::super::objects::ingest::api::IngestVideoRequest,
    ) -> Result<
        super::super::super::super::objects::ingest::api::IngestVideoResponse,
        conjure_http::private::Error,
    >;
    /// Re-ingests data from provided source datasets into either an existing target dataset, or a new one.
    /// Only supported for CSV and Parquet dataset files.
    /// Will only reingest dataset files and will drop streaming data from datasets.
    #[endpoint(
        method = POST,
        path = "/ingest/v1/reingest-dataset-files",
        name = "reingestFromDatasets",
        accept = conjure_http::client::StdResponseDeserializer
    )]
    fn reingest_from_datasets(
        &self,
        #[auth]
        auth_: &conjure_object::BearerToken,
        #[body(serializer = conjure_http::client::StdRequestSerializer)]
        request: &super::super::super::super::objects::ingest::api::ReingestDatasetsRequest,
    ) -> Result<
        super::super::super::super::objects::ingest::api::ReingestDatasetsResponse,
        conjure_http::private::Error,
    >;
    /// This is a best effort deletion of the file's data based on the ingestedAt timestamp. This is an unreversible
    /// action. Only v2 dataset file deletion is supported.
    /// !!!WARNING!!!
    /// It's possible that the file has overwritten points, in which case, those older points will not be recovered.
    /// Only use this endpoint if this is acceptable, the data across files are disjoint, or you're willing to
    /// re-ingest files to manually recover older points.
    #[endpoint(
        method = DELETE,
        path = "/ingest/v1/delete-file/{datasetRid}/file/{fileId}",
        name = "deleteFile",
        accept = conjure_http::client::conjure::EmptyResponseDeserializer
    )]
    fn delete_file(
        &self,
        #[auth]
        auth_: &conjure_object::BearerToken,
        #[path(
            name = "datasetRid",
            encoder = conjure_http::client::conjure::PlainEncoder
        )]
        dataset_rid: &super::super::super::super::objects::api::rids::DatasetRid,
        #[path(name = "fileId", encoder = conjure_http::client::conjure::PlainEncoder)]
        file_id: conjure_object::Uuid,
    ) -> Result<(), conjure_http::private::Error>;
}
/// The Ingest Service handles the data ingestion into Nominal/Clickhouse.
#[conjure_http::conjure_client(name = "IngestService")]
pub trait AsyncIngestService<
    #[response_body]
    I: conjure_http::private::Stream<
            Item = Result<conjure_http::private::Bytes, conjure_http::private::Error>,
        >,
> {
    /// Triggers an ingest job, allowing either creating a new dataset or uploading to an
    /// existing one. This endpoint is meant to supersede all other ingestion endpoints as their functionality
    /// gets migrated to this one.
    #[endpoint(
        method = POST,
        path = "/ingest/v1/ingest",
        name = "ingest",
        accept = conjure_http::client::StdResponseDeserializer
    )]
    async fn ingest(
        &self,
        #[auth]
        auth_: &conjure_object::BearerToken,
        #[body(serializer = conjure_http::client::StdRequestSerializer)]
        trigger_ingest: &super::super::super::super::objects::ingest::api::IngestRequest,
    ) -> Result<
        super::super::super::super::objects::ingest::api::IngestResponse,
        conjure_http::private::Error,
    >;
    /// Triggers an ingest job using an existing ingest job RID.
    /// Returns the same response format as the /ingest endpoint.
    #[endpoint(
        method = POST,
        path = "/ingest/v1/re-ingest",
        name = "rerunIngest",
        accept = conjure_http::client::StdResponseDeserializer
    )]
    async fn rerun_ingest(
        &self,
        #[auth]
        auth_: &conjure_object::BearerToken,
        #[body(serializer = conjure_http::client::StdRequestSerializer)]
        request: &super::super::super::super::objects::ingest::api::RerunIngestRequest,
    ) -> Result<
        super::super::super::super::objects::ingest::api::IngestResponse,
        conjure_http::private::Error,
    >;
    /// Creates a run and ingests data sources to be added to the run.
    #[endpoint(
        method = POST,
        path = "/ingest/v1/ingest-run",
        name = "ingestRun",
        accept = conjure_http::client::StdResponseDeserializer
    )]
    async fn ingest_run(
        &self,
        #[auth]
        auth_: &conjure_object::BearerToken,
        #[body(serializer = conjure_http::client::StdRequestSerializer)]
        request: &super::super::super::super::objects::ingest::api::IngestRunRequest,
    ) -> Result<
        super::super::super::super::objects::ingest::api::IngestRunResponse,
        conjure_http::private::Error,
    >;
    /// Ingests video data from a S3 Nominal upload bucket.
    #[endpoint(
        method = POST,
        path = "/ingest/v1/ingest-video",
        name = "ingestVideo",
        accept = conjure_http::client::StdResponseDeserializer
    )]
    async fn ingest_video(
        &self,
        #[auth]
        auth_: &conjure_object::BearerToken,
        #[body(serializer = conjure_http::client::StdRequestSerializer)]
        ingest_video: &super::super::super::super::objects::ingest::api::IngestVideoRequest,
    ) -> Result<
        super::super::super::super::objects::ingest::api::IngestVideoResponse,
        conjure_http::private::Error,
    >;
    /// Re-ingests data from provided source datasets into either an existing target dataset, or a new one.
    /// Only supported for CSV and Parquet dataset files.
    /// Will only reingest dataset files and will drop streaming data from datasets.
    #[endpoint(
        method = POST,
        path = "/ingest/v1/reingest-dataset-files",
        name = "reingestFromDatasets",
        accept = conjure_http::client::StdResponseDeserializer
    )]
    async fn reingest_from_datasets(
        &self,
        #[auth]
        auth_: &conjure_object::BearerToken,
        #[body(serializer = conjure_http::client::StdRequestSerializer)]
        request: &super::super::super::super::objects::ingest::api::ReingestDatasetsRequest,
    ) -> Result<
        super::super::super::super::objects::ingest::api::ReingestDatasetsResponse,
        conjure_http::private::Error,
    >;
    /// This is a best effort deletion of the file's data based on the ingestedAt timestamp. This is an unreversible
    /// action. Only v2 dataset file deletion is supported.
    /// !!!WARNING!!!
    /// It's possible that the file has overwritten points, in which case, those older points will not be recovered.
    /// Only use this endpoint if this is acceptable, the data across files are disjoint, or you're willing to
    /// re-ingest files to manually recover older points.
    #[endpoint(
        method = DELETE,
        path = "/ingest/v1/delete-file/{datasetRid}/file/{fileId}",
        name = "deleteFile",
        accept = conjure_http::client::conjure::EmptyResponseDeserializer
    )]
    async fn delete_file(
        &self,
        #[auth]
        auth_: &conjure_object::BearerToken,
        #[path(
            name = "datasetRid",
            encoder = conjure_http::client::conjure::PlainEncoder
        )]
        dataset_rid: &super::super::super::super::objects::api::rids::DatasetRid,
        #[path(name = "fileId", encoder = conjure_http::client::conjure::PlainEncoder)]
        file_id: conjure_object::Uuid,
    ) -> Result<(), conjure_http::private::Error>;
}
/// The Ingest Service handles the data ingestion into Nominal/Clickhouse.
#[conjure_http::conjure_client(name = "IngestService", local)]
pub trait LocalAsyncIngestService<
    #[response_body]
    I: conjure_http::private::Stream<
            Item = Result<conjure_http::private::Bytes, conjure_http::private::Error>,
        >,
> {
    /// Triggers an ingest job, allowing either creating a new dataset or uploading to an
    /// existing one. This endpoint is meant to supersede all other ingestion endpoints as their functionality
    /// gets migrated to this one.
    #[endpoint(
        method = POST,
        path = "/ingest/v1/ingest",
        name = "ingest",
        accept = conjure_http::client::StdResponseDeserializer
    )]
    async fn ingest(
        &self,
        #[auth]
        auth_: &conjure_object::BearerToken,
        #[body(serializer = conjure_http::client::StdRequestSerializer)]
        trigger_ingest: &super::super::super::super::objects::ingest::api::IngestRequest,
    ) -> Result<
        super::super::super::super::objects::ingest::api::IngestResponse,
        conjure_http::private::Error,
    >;
    /// Triggers an ingest job using an existing ingest job RID.
    /// Returns the same response format as the /ingest endpoint.
    #[endpoint(
        method = POST,
        path = "/ingest/v1/re-ingest",
        name = "rerunIngest",
        accept = conjure_http::client::StdResponseDeserializer
    )]
    async fn rerun_ingest(
        &self,
        #[auth]
        auth_: &conjure_object::BearerToken,
        #[body(serializer = conjure_http::client::StdRequestSerializer)]
        request: &super::super::super::super::objects::ingest::api::RerunIngestRequest,
    ) -> Result<
        super::super::super::super::objects::ingest::api::IngestResponse,
        conjure_http::private::Error,
    >;
    /// Creates a run and ingests data sources to be added to the run.
    #[endpoint(
        method = POST,
        path = "/ingest/v1/ingest-run",
        name = "ingestRun",
        accept = conjure_http::client::StdResponseDeserializer
    )]
    async fn ingest_run(
        &self,
        #[auth]
        auth_: &conjure_object::BearerToken,
        #[body(serializer = conjure_http::client::StdRequestSerializer)]
        request: &super::super::super::super::objects::ingest::api::IngestRunRequest,
    ) -> Result<
        super::super::super::super::objects::ingest::api::IngestRunResponse,
        conjure_http::private::Error,
    >;
    /// Ingests video data from a S3 Nominal upload bucket.
    #[endpoint(
        method = POST,
        path = "/ingest/v1/ingest-video",
        name = "ingestVideo",
        accept = conjure_http::client::StdResponseDeserializer
    )]
    async fn ingest_video(
        &self,
        #[auth]
        auth_: &conjure_object::BearerToken,
        #[body(serializer = conjure_http::client::StdRequestSerializer)]
        ingest_video: &super::super::super::super::objects::ingest::api::IngestVideoRequest,
    ) -> Result<
        super::super::super::super::objects::ingest::api::IngestVideoResponse,
        conjure_http::private::Error,
    >;
    /// Re-ingests data from provided source datasets into either an existing target dataset, or a new one.
    /// Only supported for CSV and Parquet dataset files.
    /// Will only reingest dataset files and will drop streaming data from datasets.
    #[endpoint(
        method = POST,
        path = "/ingest/v1/reingest-dataset-files",
        name = "reingestFromDatasets",
        accept = conjure_http::client::StdResponseDeserializer
    )]
    async fn reingest_from_datasets(
        &self,
        #[auth]
        auth_: &conjure_object::BearerToken,
        #[body(serializer = conjure_http::client::StdRequestSerializer)]
        request: &super::super::super::super::objects::ingest::api::ReingestDatasetsRequest,
    ) -> Result<
        super::super::super::super::objects::ingest::api::ReingestDatasetsResponse,
        conjure_http::private::Error,
    >;
    /// This is a best effort deletion of the file's data based on the ingestedAt timestamp. This is an unreversible
    /// action. Only v2 dataset file deletion is supported.
    /// !!!WARNING!!!
    /// It's possible that the file has overwritten points, in which case, those older points will not be recovered.
    /// Only use this endpoint if this is acceptable, the data across files are disjoint, or you're willing to
    /// re-ingest files to manually recover older points.
    #[endpoint(
        method = DELETE,
        path = "/ingest/v1/delete-file/{datasetRid}/file/{fileId}",
        name = "deleteFile",
        accept = conjure_http::client::conjure::EmptyResponseDeserializer
    )]
    async fn delete_file(
        &self,
        #[auth]
        auth_: &conjure_object::BearerToken,
        #[path(
            name = "datasetRid",
            encoder = conjure_http::client::conjure::PlainEncoder
        )]
        dataset_rid: &super::super::super::super::objects::api::rids::DatasetRid,
        #[path(name = "fileId", encoder = conjure_http::client::conjure::PlainEncoder)]
        file_id: conjure_object::Uuid,
    ) -> Result<(), conjure_http::private::Error>;
}