nominal-api 0.1230.0

API bindings for the Nominal platform
Documentation
// This file is @generated by prost-build.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct IngestFileRequest {
    #[prost(string, tag = "1")]
    pub ingest_job_rid: ::prost::alloc::string::String,
    #[prost(string, tag = "2")]
    pub dataset_file_id: ::prost::alloc::string::String,
    #[prost(string, tag = "3")]
    pub dataset_rid: ::prost::alloc::string::String,
    #[prost(string, tag = "4")]
    pub org_rid: ::prost::alloc::string::String,
    #[prost(string, tag = "5")]
    pub workspace_rid: ::prost::alloc::string::String,
    #[prost(message, optional, tag = "6")]
    pub handle: ::core::option::Option<super::super::types::object_storage::Handle>,
    #[prost(message, optional, tag = "7")]
    pub timestamp_metadata: ::core::option::Option<TimestampMetadata>,
    #[prost(map = "string, string", tag = "8")]
    pub additional_tags: ::std::collections::HashMap<
        ::prost::alloc::string::String,
        ::prost::alloc::string::String,
    >,
    /// Time the dataset file row was created. Plumbed through the workflow onto every
    /// WriteFileDataRequest published to Kafka.
    #[prost(message, optional, tag = "11")]
    pub file_created_at: ::core::option::Option<
        super::super::super::google::protobuf::Timestamp,
    >,
    #[prost(oneof = "ingest_file_request::Ingest", tags = "9, 10")]
    pub ingest: ::core::option::Option<ingest_file_request::Ingest>,
}
/// Nested message and enum types in `IngestFileRequest`.
pub mod ingest_file_request {
    #[derive(Clone, PartialEq, ::prost::Oneof)]
    pub enum Ingest {
        #[prost(message, tag = "9")]
        LogIngest(super::LogFileIngest),
        #[prost(message, tag = "10")]
        DataIngest(super::DataFileIngest),
    }
}
/// Single-channel, line-delimited log files (e.g. JSON-L journal logs) where each line is one
/// sample for the same channel.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct LogFileIngest {
    #[prost(string, tag = "1")]
    pub log_channel: ::prost::alloc::string::String,
}
/// Data files. The `shape` oneof picks the logical layout of the file: wide (row holds many
/// channels), long (row holds one sample), or batch (record holds arrays of samples for one
/// channel). Each shape's options carry their own format oneof so any of CSV/Parquet/Avro
/// can encode any shape. Format-specific parser config lives inside CsvOpts/ParquetOpts/
/// AvroOpts.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DataFileIngest {
    #[prost(map = "string, string", tag = "4")]
    pub units: ::std::collections::HashMap<
        ::prost::alloc::string::String,
        ::prost::alloc::string::String,
    >,
    #[prost(string, optional, tag = "5")]
    pub channel_prefix: ::core::option::Option<::prost::alloc::string::String>,
    #[prost(oneof = "data_file_ingest::Shape", tags = "1, 2, 3")]
    pub shape: ::core::option::Option<data_file_ingest::Shape>,
}
/// Nested message and enum types in `DataFileIngest`.
pub mod data_file_ingest {
    #[derive(Clone, PartialEq, ::prost::Oneof)]
    pub enum Shape {
        #[prost(message, tag = "1")]
        WideOpts(super::WideOpts),
        #[prost(message, tag = "2")]
        LongOpts(super::LongOpts),
        #[prost(message, tag = "3")]
        BatchOpts(super::BatchOpts),
    }
}
/// Wide format: each row (or Avro record) holds one timestamp and many channels-as-columns,
/// so a single row contributes one sample to each channel. Non-channel columns can be
/// declared as tag sources or excluded entirely.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct WideOpts {
    /// Maps a tag name to the column/Avro-field whose value is that tag.
    #[prost(map = "string, string", tag = "4")]
    pub tag_columns: ::std::collections::HashMap<
        ::prost::alloc::string::String,
        ::prost::alloc::string::String,
    >,
    /// Columns/Avro-fields to skip entirely (neither channel nor tag).
    #[prost(string, repeated, tag = "5")]
    pub exclude_columns: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
    #[prost(oneof = "wide_opts::Format", tags = "1, 2, 3")]
    pub format: ::core::option::Option<wide_opts::Format>,
}
/// Nested message and enum types in `WideOpts`.
pub mod wide_opts {
    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
    pub enum Format {
        #[prost(message, tag = "1")]
        CsvOpts(super::CsvOpts),
        #[prost(message, tag = "2")]
        ParquetOpts(super::ParquetOpts),
        #[prost(message, tag = "3")]
        AvroOpts(super::AvroOpts),
    }
}
/// Long format: each row (or Avro record) is a single (channel, timestamp, value, tags) sample,
/// with channel name and value carried in dedicated columns. The timestamp column is supplied
/// by the top-level TimestampMetadata.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct LongOpts {
    #[prost(string, tag = "4")]
    pub channel_column: ::prost::alloc::string::String,
    #[prost(string, tag = "5")]
    pub value_column: ::prost::alloc::string::String,
    /// Column/field that carries this row's tags as a serialized map (e.g. JSON object).
    #[prost(string, optional, tag = "6")]
    pub tags_column: ::core::option::Option<::prost::alloc::string::String>,
    #[prost(oneof = "long_opts::Format", tags = "1, 2, 3")]
    pub format: ::core::option::Option<long_opts::Format>,
}
/// Nested message and enum types in `LongOpts`.
pub mod long_opts {
    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
    pub enum Format {
        #[prost(message, tag = "1")]
        CsvOpts(super::CsvOpts),
        #[prost(message, tag = "2")]
        ParquetOpts(super::ParquetOpts),
        #[prost(message, tag = "3")]
        AvroOpts(super::AvroOpts),
    }
}
/// Batch / record-of-arrays: each record carries one channel plus parallel arrays of timestamps
/// and values. Avro today; CSV/Parquet record-format encodings can be added later.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BatchOpts {
    /// Field paths into the record that locate each piece of the batch.
    #[prost(string, tag = "2")]
    pub channel_field: ::prost::alloc::string::String,
    #[prost(string, tag = "3")]
    pub timestamps_field: ::prost::alloc::string::String,
    #[prost(string, tag = "4")]
    pub values_field: ::prost::alloc::string::String,
    #[prost(string, optional, tag = "5")]
    pub tags_field: ::core::option::Option<::prost::alloc::string::String>,
    #[prost(oneof = "batch_opts::Format", tags = "1")]
    pub format: ::core::option::Option<batch_opts::Format>,
}
/// Nested message and enum types in `BatchOpts`.
pub mod batch_opts {
    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
    pub enum Format {
        #[prost(message, tag = "1")]
        AvroOpts(super::AvroOpts),
    }
}
/// Pairs the column that holds the timestamp with the parser that interprets its values.
/// The parser variants live in nominal/types/time/timestamp_parsers.proto so other surfaces
/// (procedures, registry, etc.) share the same wire format.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TimestampMetadata {
    /// Column/field in the file that holds the timestamp. The caller is responsible for supplying
    /// this — for Avro that means the schema-fixed timestamp field name.
    #[prost(string, tag = "1")]
    pub column: ::prost::alloc::string::String,
    #[prost(message, optional, tag = "2")]
    pub r#type: ::core::option::Option<super::super::types::time::TimestampType>,
}
#[derive(Clone, Copy, PartialEq, ::prost::Message)]
pub struct CsvOpts {}
#[derive(Clone, Copy, PartialEq, ::prost::Message)]
pub struct ParquetOpts {}
#[derive(Clone, Copy, PartialEq, ::prost::Message)]
pub struct AvroOpts {}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SetDatasetFileIngestStatusRequest {
    #[prost(string, tag = "1")]
    pub dataset_file_id: ::prost::alloc::string::String,
    #[prost(string, tag = "2")]
    pub dataset_rid: ::prost::alloc::string::String,
    #[prost(string, tag = "6")]
    pub ingest_job_rid: ::prost::alloc::string::String,
    /// Refinery only drives parsing, ingesting, and error transitions; other IngestStatusV2 variants
    /// are set elsewhere. The empty payloads exist so we can grow them later (e.g.
    /// Parsing.bytes_processed) without changing the activity signature.
    #[prost(oneof = "set_dataset_file_ingest_status_request::Status", tags = "3, 4, 5")]
    pub status: ::core::option::Option<set_dataset_file_ingest_status_request::Status>,
}
/// Nested message and enum types in `SetDatasetFileIngestStatusRequest`.
pub mod set_dataset_file_ingest_status_request {
    /// Refinery only drives parsing, ingesting, and error transitions; other IngestStatusV2 variants
    /// are set elsewhere. The empty payloads exist so we can grow them later (e.g.
    /// Parsing.bytes_processed) without changing the activity signature.
    #[derive(Clone, PartialEq, ::prost::Oneof)]
    pub enum Status {
        #[prost(message, tag = "3")]
        Parsing(super::Parsing),
        #[prost(message, tag = "4")]
        Ingesting(super::Ingesting),
        #[prost(message, tag = "5")]
        Error(super::Error),
    }
}
#[derive(Clone, Copy, PartialEq, ::prost::Message)]
pub struct SetDatasetFileIngestStatusResponse {}
#[derive(Clone, Copy, PartialEq, ::prost::Message)]
pub struct Parsing {}
#[derive(Clone, Copy, PartialEq, ::prost::Message)]
pub struct Ingesting {
    #[prost(message, optional, tag = "1")]
    pub bounds: ::core::option::Option<super::super::types::time::Range>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Error {
    #[prost(string, tag = "1")]
    pub error_type: ::prost::alloc::string::String,
    #[prost(string, tag = "2")]
    pub message: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ParseFileRequest {
    #[prost(string, tag = "1")]
    pub ingest_job_rid: ::prost::alloc::string::String,
    #[prost(string, tag = "2")]
    pub dataset_file_id: ::prost::alloc::string::String,
    #[prost(string, tag = "3")]
    pub dataset_rid: ::prost::alloc::string::String,
    #[prost(message, optional, tag = "4")]
    pub data_ingest: ::core::option::Option<DataFileIngest>,
    #[prost(string, tag = "5")]
    pub workspace_rid: ::prost::alloc::string::String,
}
/// Parses the workflow input into staged object-storage batches.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ParseFileResponse {
    #[prost(message, repeated, tag = "1")]
    pub staged_batches: ::prost::alloc::vec::Vec<StagedBatch>,
    #[prost(message, optional, tag = "2")]
    pub bounds: ::core::option::Option<super::super::types::time::Range>,
}
/// Publishes one WriteFileDataRequest to Kafka per staged batch, keyed by dataset_rid.
/// Each batch's handle points at the staged payload in object storage.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct WriteFileBatchesToKafkaRequest {
    #[prost(message, repeated, tag = "1")]
    pub staged_batches: ::prost::alloc::vec::Vec<StagedBatch>,
    #[prost(string, tag = "2")]
    pub ingest_job_rid: ::prost::alloc::string::String,
    #[prost(string, tag = "3")]
    pub file_rid: ::prost::alloc::string::String,
    #[prost(string, tag = "4")]
    pub org_rid: ::prost::alloc::string::String,
    #[prost(string, tag = "5")]
    pub dataset_file_id: ::prost::alloc::string::String,
    #[prost(string, tag = "6")]
    pub dataset_rid: ::prost::alloc::string::String,
    #[prost(message, optional, tag = "7")]
    pub file_created_at: ::core::option::Option<
        super::super::super::google::protobuf::Timestamp,
    >,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StagedBatch {
    #[prost(int32, tag = "1")]
    pub batch_id: i32,
    #[prost(message, optional, tag = "2")]
    pub handle: ::core::option::Option<super::super::types::object_storage::Handle>,
    #[prost(
        enumeration = "super::super::direct_channel_writer::v2::BatchFormat",
        tag = "3"
    )]
    pub format: i32,
}
#[derive(Clone, Copy, PartialEq, ::prost::Message)]
pub struct WriteFileBatchesToKafkaResponse {}