google_cloudevents/google/events/cloud/dataflow/v1beta3/mod.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966
// This file is @generated by prost-build.
/// Describes the environment in which a Dataflow Job runs.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Environment {
/// The prefix of the resources the system should use for temporary
/// storage. The system will append the suffix "/temp-{JOBNAME} to
/// this resource prefix, where {JOBNAME} is the value of the
/// job_name field. The resulting bucket and object prefix is used
/// as the prefix of the resources used to store temporary data
/// needed during the job execution. NOTE: This will override the
/// value in taskrunner_settings.
/// The supported resource type is:
///
/// Google Cloud Storage:
///
/// storage.googleapis.com/{bucket}/{object}
/// bucket.storage.googleapis.com/{object}
#[prost(string, tag = "1")]
pub temp_storage_prefix: ::prost::alloc::string::String,
/// The type of cluster manager API to use. If unknown or
/// unspecified, the service will attempt to choose a reasonable
/// default. This should be in the form of the API service name,
/// e.g. "compute.googleapis.com".
#[prost(string, tag = "2")]
pub cluster_manager_api_service: ::prost::alloc::string::String,
/// The list of experiments to enable. This field should be used for SDK
/// related experiments and not for service related experiments. The proper
/// field for service related experiments is service_options.
#[prost(string, repeated, tag = "3")]
pub experiments: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
/// The list of service options to enable. This field should be used for
/// service related experiments only. These experiments, when graduating to GA,
/// should be replaced by dedicated fields or become default (i.e. always on).
#[prost(string, repeated, tag = "16")]
pub service_options: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
/// If set, contains the Cloud KMS key identifier used to encrypt data
/// at rest, AKA a Customer Managed Encryption Key (CMEK).
///
/// Format:
/// projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY
#[prost(string, tag = "12")]
pub service_kms_key_name: ::prost::alloc::string::String,
/// The worker pools. At least one "harness" worker pool must be
/// specified in order for the job to have workers.
#[prost(message, repeated, tag = "4")]
pub worker_pools: ::prost::alloc::vec::Vec<WorkerPool>,
/// A description of the process that generated the request.
#[prost(message, optional, tag = "5")]
pub user_agent: ::core::option::Option<::prost_types::Struct>,
/// A structure describing which components and their versions of the service
/// are required in order to run the job.
#[prost(message, optional, tag = "6")]
pub version: ::core::option::Option<::prost_types::Struct>,
/// The dataset for the current project where various workflow
/// related tables are stored.
///
/// The supported resource type is:
///
/// Google BigQuery:
/// bigquery.googleapis.com/{dataset}
#[prost(string, tag = "7")]
pub dataset: ::prost::alloc::string::String,
/// The Cloud Dataflow SDK pipeline options specified by the user. These
/// options are passed through the service and are used to recreate the
/// SDK pipeline options on the worker in a language agnostic and platform
/// independent way.
#[prost(message, optional, tag = "8")]
pub sdk_pipeline_options: ::core::option::Option<::prost_types::Struct>,
/// Identity to run virtual machines as. Defaults to the default account.
#[prost(string, tag = "10")]
pub service_account_email: ::prost::alloc::string::String,
/// Which Flexible Resource Scheduling mode to run in.
#[prost(enumeration = "FlexResourceSchedulingGoal", tag = "11")]
pub flex_resource_scheduling_goal: i32,
/// The Compute Engine region
/// (<https://cloud.google.com/compute/docs/regions-zones/regions-zones>) in
/// which worker processing should occur, e.g. "us-west1". Mutually exclusive
/// with worker_zone. If neither worker_region nor worker_zone is specified,
/// default to the control plane's region.
#[prost(string, tag = "13")]
pub worker_region: ::prost::alloc::string::String,
/// The Compute Engine zone
/// (<https://cloud.google.com/compute/docs/regions-zones/regions-zones>) in
/// which worker processing should occur, e.g. "us-west1-a". Mutually exclusive
/// with worker_region. If neither worker_region nor worker_zone is specified,
/// a zone in the control plane's region is chosen based on available capacity.
#[prost(string, tag = "14")]
pub worker_zone: ::prost::alloc::string::String,
/// Output only. The shuffle mode used for the job.
#[prost(enumeration = "ShuffleMode", tag = "15")]
pub shuffle_mode: i32,
/// Any debugging options to be supplied to the job.
#[prost(message, optional, tag = "17")]
pub debug_options: ::core::option::Option<DebugOptions>,
}
/// The packages that must be installed in order for a worker to run the
/// steps of the Cloud Dataflow job that will be assigned to its worker
/// pool.
///
/// This is the mechanism by which the Cloud Dataflow SDK causes code to
/// be loaded onto the workers. For example, the Cloud Dataflow Java SDK
/// might use this to install jars containing the user's code and all of the
/// various dependencies (libraries, data files, etc.) required in order
/// for that code to run.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Package {
/// The name of the package.
#[prost(string, tag = "1")]
pub name: ::prost::alloc::string::String,
/// The resource to read the package from. The supported resource type is:
///
/// Google Cloud Storage:
///
/// storage.googleapis.com/{bucket}
/// bucket.storage.googleapis.com/
#[prost(string, tag = "2")]
pub location: ::prost::alloc::string::String,
}
/// Settings for WorkerPool autoscaling.
#[derive(Clone, Copy, PartialEq, ::prost::Message)]
pub struct AutoscalingSettings {
/// The algorithm to use for autoscaling.
#[prost(enumeration = "AutoscalingAlgorithm", tag = "1")]
pub algorithm: i32,
/// The maximum number of workers to cap scaling at.
#[prost(int32, tag = "2")]
pub max_num_workers: i32,
}
/// Defines an SDK harness container for executing Dataflow pipelines.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SdkHarnessContainerImage {
/// A docker container image that resides in Google Container Registry.
#[prost(string, tag = "1")]
pub container_image: ::prost::alloc::string::String,
/// If true, recommends the Dataflow service to use only one core per SDK
/// container instance with this image. If false (or unset) recommends using
/// more than one core per SDK container instance with this image for
/// efficiency. Note that Dataflow service may choose to override this property
/// if needed.
#[prost(bool, tag = "2")]
pub use_single_core_per_container: bool,
/// Environment ID for the Beam runner API proto Environment that corresponds
/// to the current SDK Harness.
#[prost(string, tag = "3")]
pub environment_id: ::prost::alloc::string::String,
/// The set of capabilities enumerated in the above Environment proto. See also
/// [beam_runner_api.proto](<https://github.com/apache/beam/blob/master/model/pipeline/src/main/proto/org/apache/beam/model/pipeline/v1/beam_runner_api.proto>)
#[prost(string, repeated, tag = "4")]
pub capabilities: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
}
/// Describes one particular pool of Cloud Dataflow workers to be
/// instantiated by the Cloud Dataflow service in order to perform the
/// computations required by a job. Note that a workflow job may use
/// multiple pools, in order to match the various computational
/// requirements of the various stages of the job.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct WorkerPool {
/// The kind of the worker pool; currently only `harness` and `shuffle`
/// are supported.
#[prost(string, tag = "1")]
pub kind: ::prost::alloc::string::String,
/// Number of Google Compute Engine workers in this pool needed to
/// execute the job. If zero or unspecified, the service will
/// attempt to choose a reasonable default.
#[prost(int32, tag = "2")]
pub num_workers: i32,
/// Packages to be installed on workers.
#[prost(message, repeated, tag = "3")]
pub packages: ::prost::alloc::vec::Vec<Package>,
/// The default package set to install. This allows the service to
/// select a default set of packages which are useful to worker
/// harnesses written in a particular language.
#[prost(enumeration = "DefaultPackageSet", tag = "4")]
pub default_package_set: i32,
/// Machine type (e.g. "n1-standard-1"). If empty or unspecified, the
/// service will attempt to choose a reasonable default.
#[prost(string, tag = "5")]
pub machine_type: ::prost::alloc::string::String,
/// Sets the policy for determining when to turndown worker pool.
/// Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and
/// `TEARDOWN_NEVER`.
/// `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether
/// the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down
/// if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn
/// down.
///
/// If the workers are not torn down by the service, they will
/// continue to run and use Google Compute Engine VM resources in the
/// user's project until they are explicitly terminated by the user.
/// Because of this, Google recommends using the `TEARDOWN_ALWAYS`
/// policy except for small, manually supervised test jobs.
///
/// If unknown or unspecified, the service will attempt to choose a reasonable
/// default.
#[prost(enumeration = "TeardownPolicy", tag = "6")]
pub teardown_policy: i32,
/// Size of root disk for VMs, in GB. If zero or unspecified, the service will
/// attempt to choose a reasonable default.
#[prost(int32, tag = "7")]
pub disk_size_gb: i32,
/// Type of root disk for VMs. If empty or unspecified, the service will
/// attempt to choose a reasonable default.
#[prost(string, tag = "16")]
pub disk_type: ::prost::alloc::string::String,
/// Fully qualified source image for disks.
#[prost(string, tag = "8")]
pub disk_source_image: ::prost::alloc::string::String,
/// Zone to run the worker pools in. If empty or unspecified, the service
/// will attempt to choose a reasonable default.
#[prost(string, tag = "9")]
pub zone: ::prost::alloc::string::String,
/// The action to take on host maintenance, as defined by the Google
/// Compute Engine API.
#[prost(string, tag = "11")]
pub on_host_maintenance: ::prost::alloc::string::String,
/// Metadata to set on the Google Compute Engine VMs.
#[prost(map = "string, string", tag = "13")]
pub metadata:
::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>,
/// Settings for autoscaling of this WorkerPool.
#[prost(message, optional, tag = "14")]
pub autoscaling_settings: ::core::option::Option<AutoscalingSettings>,
/// Network to which VMs will be assigned. If empty or unspecified,
/// the service will use the network "default".
#[prost(string, tag = "17")]
pub network: ::prost::alloc::string::String,
/// Subnetwork to which VMs will be assigned, if desired. Expected to be of
/// the form "regions/REGION/subnetworks/SUBNETWORK".
#[prost(string, tag = "19")]
pub subnetwork: ::prost::alloc::string::String,
/// Required. Docker container image that executes the Cloud Dataflow worker
/// harness, residing in Google Container Registry.
///
/// Deprecated for the Fn API path. Use sdk_harness_container_images instead.
#[prost(string, tag = "18")]
pub worker_harness_container_image: ::prost::alloc::string::String,
/// The number of threads per worker harness. If empty or unspecified, the
/// service will choose a number of threads (according to the number of cores
/// on the selected machine type for batch, or 1 by convention for streaming).
#[prost(int32, tag = "20")]
pub num_threads_per_worker: i32,
/// Configuration for VM IPs.
#[prost(enumeration = "WorkerIpAddressConfiguration", tag = "21")]
pub ip_configuration: i32,
/// Set of SDK harness containers needed to execute this pipeline. This will
/// only be set in the Fn API path. For non-cross-language pipelines this
/// should have only one entry. Cross-language pipelines will have two or more
/// entries.
#[prost(message, repeated, tag = "22")]
pub sdk_harness_container_images: ::prost::alloc::vec::Vec<SdkHarnessContainerImage>,
}
/// Describes any options that have an effect on the debugging of pipelines.
#[derive(Clone, Copy, PartialEq, ::prost::Message)]
pub struct DebugOptions {
/// When true, enables the logging of the literal hot key to the user's Cloud
/// Logging.
#[prost(bool, tag = "1")]
pub enable_hot_key_logging: bool,
}
/// Defines a job to be run by the Cloud Dataflow service. Do not enter
/// confidential information when you supply string values using the API.
/// Fields stripped from source Job proto:
/// - steps
/// - pipeline_description
/// - transform_name_mapping
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Job {
/// The unique ID of this job.
///
/// This field is set by the Cloud Dataflow service when the Job is
/// created, and is immutable for the life of the job.
#[prost(string, tag = "1")]
pub id: ::prost::alloc::string::String,
/// The ID of the Cloud Platform project that the job belongs to.
#[prost(string, tag = "2")]
pub project_id: ::prost::alloc::string::String,
/// The user-specified Cloud Dataflow job name.
///
/// Only one Job with a given name can exist in a project within one region at
/// any given time. Jobs in different regions can have the same name.
/// If a caller attempts to create a Job with the same
/// name as an already-existing Job, the attempt returns the
/// existing Job.
///
/// The name must match the regular expression
/// `[a-z](\[-a-z0-9\]{0,1022}\[a-z0-9\])?`
#[prost(string, tag = "3")]
pub name: ::prost::alloc::string::String,
/// The type of Cloud Dataflow job.
#[prost(enumeration = "JobType", tag = "4")]
pub r#type: i32,
/// The environment for the job.
#[prost(message, optional, tag = "5")]
pub environment: ::core::option::Option<Environment>,
/// The Cloud Storage location where the steps are stored.
#[prost(string, tag = "24")]
pub steps_location: ::prost::alloc::string::String,
/// The current state of the job.
///
/// Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise
/// specified.
///
/// A job in the `JOB_STATE_RUNNING` state may asynchronously enter a
/// terminal state. After a job has reached a terminal state, no
/// further state updates may be made.
///
/// This field may be mutated by the Cloud Dataflow service;
/// callers cannot mutate it.
#[prost(enumeration = "JobState", tag = "7")]
pub current_state: i32,
/// The timestamp associated with the current state.
#[prost(message, optional, tag = "8")]
pub current_state_time: ::core::option::Option<::prost_types::Timestamp>,
/// The job's requested state.
///
/// `UpdateJob` may be used to switch between the `JOB_STATE_STOPPED` and
/// `JOB_STATE_RUNNING` states, by setting requested_state. `UpdateJob` may
/// also be used to directly set a job's requested state to
/// `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating the
/// job if it has not already reached a terminal state.
#[prost(enumeration = "JobState", tag = "9")]
pub requested_state: i32,
/// Deprecated.
#[prost(message, optional, tag = "10")]
pub execution_info: ::core::option::Option<JobExecutionInfo>,
/// The timestamp when the job was initially created. Immutable and set by the
/// Cloud Dataflow service.
#[prost(message, optional, tag = "11")]
pub create_time: ::core::option::Option<::prost_types::Timestamp>,
/// If this job is an update of an existing job, this field is the job ID
/// of the job it replaced.
///
/// When sending a `CreateJobRequest`, you can update a job by specifying it
/// here. The job named here is stopped, and its intermediate state is
/// transferred to this job.
#[prost(string, tag = "12")]
pub replace_job_id: ::prost::alloc::string::String,
/// The client's unique identifier of the job, re-used across retried attempts.
/// If this field is set, the service will ensure its uniqueness.
/// The request to create a job will fail if the service has knowledge of a
/// previously submitted job with the same client's ID and job name.
/// The caller may use this field to ensure idempotence of job
/// creation across retried attempts to create a job.
/// By default, the field is empty and, in that case, the service ignores it.
#[prost(string, tag = "14")]
pub client_request_id: ::prost::alloc::string::String,
/// If another job is an update of this job (and thus, this job is in
/// `JOB_STATE_UPDATED`), this field contains the ID of that job.
#[prost(string, tag = "15")]
pub replaced_by_job_id: ::prost::alloc::string::String,
/// A set of files the system should be aware of that are used
/// for temporary storage. These temporary files will be
/// removed on job completion.
/// No duplicates are allowed.
/// No file patterns are supported.
///
/// The supported files are:
///
/// Google Cloud Storage:
///
/// storage.googleapis.com/{bucket}/{object}
/// bucket.storage.googleapis.com/{object}
#[prost(string, repeated, tag = "16")]
pub temp_files: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
/// User-defined labels for this job.
///
/// The labels map can contain no more than 64 entries. Entries of the labels
/// map are UTF8 strings that comply with the following restrictions:
///
/// * Keys must conform to regexp: [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
/// * Values must conform to regexp: \[\p{Ll}\p{Lo}\p{N}_-\]{0,63}
/// * Both keys and values are additionally constrained to be <= 128 bytes in
/// size.
#[prost(map = "string, string", tag = "17")]
pub labels:
::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>,
/// The \[regional endpoint\]
/// (<https://cloud.google.com/dataflow/docs/concepts/regional-endpoints>) that
/// contains this job.
#[prost(string, tag = "18")]
pub location: ::prost::alloc::string::String,
/// This field may be mutated by the Cloud Dataflow service;
/// callers cannot mutate it.
#[prost(message, repeated, tag = "20")]
pub stage_states: ::prost::alloc::vec::Vec<ExecutionStageState>,
/// This field is populated by the Dataflow service to support filtering jobs
/// by the metadata values provided here. Populated for ListJobs and all GetJob
/// views SUMMARY and higher.
#[prost(message, optional, tag = "21")]
pub job_metadata: ::core::option::Option<JobMetadata>,
/// The timestamp when the job was started (transitioned to JOB_STATE_PENDING).
/// Flexible resource scheduling jobs are started with some delay after job
/// creation, so start_time is unset before start and is updated when the
/// job is started by the Cloud Dataflow service. For other jobs, start_time
/// always equals to create_time and is immutable and set by the Cloud Dataflow
/// service.
#[prost(message, optional, tag = "22")]
pub start_time: ::core::option::Option<::prost_types::Timestamp>,
/// If this is specified, the job's initial state is populated from the given
/// snapshot.
#[prost(string, tag = "23")]
pub created_from_snapshot_id: ::prost::alloc::string::String,
/// Reserved for future use. This field is set only in responses from the
/// server; it is ignored if it is set in any requests.
#[prost(bool, tag = "25")]
pub satisfies_pzs: bool,
}
/// Metadata for a Datastore connector used by the job.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DatastoreIoDetails {
/// Namespace used in the connection.
#[prost(string, tag = "1")]
pub namespace: ::prost::alloc::string::String,
/// ProjectId accessed in the connection.
#[prost(string, tag = "2")]
pub project_id: ::prost::alloc::string::String,
}
/// Metadata for a Pub/Sub connector used by the job.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PubSubIoDetails {
/// Topic accessed in the connection.
#[prost(string, tag = "1")]
pub topic: ::prost::alloc::string::String,
/// Subscription used in the connection.
#[prost(string, tag = "2")]
pub subscription: ::prost::alloc::string::String,
}
/// Metadata for a File connector used by the job.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct FileIoDetails {
/// File Pattern used to access files by the connector.
#[prost(string, tag = "1")]
pub file_pattern: ::prost::alloc::string::String,
}
/// Metadata for a Cloud Bigtable connector used by the job.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BigTableIoDetails {
/// ProjectId accessed in the connection.
#[prost(string, tag = "1")]
pub project_id: ::prost::alloc::string::String,
/// InstanceId accessed in the connection.
#[prost(string, tag = "2")]
pub instance_id: ::prost::alloc::string::String,
/// TableId accessed in the connection.
#[prost(string, tag = "3")]
pub table_id: ::prost::alloc::string::String,
}
/// Metadata for a BigQuery connector used by the job.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BigQueryIoDetails {
/// Table accessed in the connection.
#[prost(string, tag = "1")]
pub table: ::prost::alloc::string::String,
/// Dataset accessed in the connection.
#[prost(string, tag = "2")]
pub dataset: ::prost::alloc::string::String,
/// Project accessed in the connection.
#[prost(string, tag = "3")]
pub project_id: ::prost::alloc::string::String,
/// Query used to access data in the connection.
#[prost(string, tag = "4")]
pub query: ::prost::alloc::string::String,
}
/// Metadata for a Spanner connector used by the job.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SpannerIoDetails {
/// ProjectId accessed in the connection.
#[prost(string, tag = "1")]
pub project_id: ::prost::alloc::string::String,
/// InstanceId accessed in the connection.
#[prost(string, tag = "2")]
pub instance_id: ::prost::alloc::string::String,
/// DatabaseId accessed in the connection.
#[prost(string, tag = "3")]
pub database_id: ::prost::alloc::string::String,
}
/// The version of the SDK used to run the job.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SdkVersion {
/// The version of the SDK used to run the job.
#[prost(string, tag = "1")]
pub version: ::prost::alloc::string::String,
/// A readable string describing the version of the SDK.
#[prost(string, tag = "2")]
pub version_display_name: ::prost::alloc::string::String,
/// The support status for this SDK version.
#[prost(enumeration = "sdk_version::SdkSupportStatus", tag = "3")]
pub sdk_support_status: i32,
}
/// Nested message and enum types in `SdkVersion`.
pub mod sdk_version {
/// The support status of the SDK used to run the job.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum SdkSupportStatus {
/// Cloud Dataflow is unaware of this version.
Unknown = 0,
/// This is a known version of an SDK, and is supported.
Supported = 1,
/// A newer version of the SDK family exists, and an update is recommended.
Stale = 2,
/// This version of the SDK is deprecated and will eventually be
/// unsupported.
Deprecated = 3,
/// Support for this SDK version has ended and it should no longer be used.
Unsupported = 4,
}
impl SdkSupportStatus {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
Self::Unknown => "UNKNOWN",
Self::Supported => "SUPPORTED",
Self::Stale => "STALE",
Self::Deprecated => "DEPRECATED",
Self::Unsupported => "UNSUPPORTED",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"UNKNOWN" => Some(Self::Unknown),
"SUPPORTED" => Some(Self::Supported),
"STALE" => Some(Self::Stale),
"DEPRECATED" => Some(Self::Deprecated),
"UNSUPPORTED" => Some(Self::Unsupported),
_ => None,
}
}
}
}
/// Metadata available primarily for filtering jobs. Will be included in the
/// ListJob response and Job SUMMARY view.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct JobMetadata {
/// The SDK version used to run the job.
#[prost(message, optional, tag = "1")]
pub sdk_version: ::core::option::Option<SdkVersion>,
/// Identification of a Spanner source used in the Dataflow job.
#[prost(message, repeated, tag = "2")]
pub spanner_details: ::prost::alloc::vec::Vec<SpannerIoDetails>,
/// Identification of a BigQuery source used in the Dataflow job.
#[prost(message, repeated, tag = "3")]
pub bigquery_details: ::prost::alloc::vec::Vec<BigQueryIoDetails>,
/// Identification of a Cloud Bigtable source used in the Dataflow job.
#[prost(message, repeated, tag = "4")]
pub big_table_details: ::prost::alloc::vec::Vec<BigTableIoDetails>,
/// Identification of a Pub/Sub source used in the Dataflow job.
#[prost(message, repeated, tag = "5")]
pub pubsub_details: ::prost::alloc::vec::Vec<PubSubIoDetails>,
/// Identification of a File source used in the Dataflow job.
#[prost(message, repeated, tag = "6")]
pub file_details: ::prost::alloc::vec::Vec<FileIoDetails>,
/// Identification of a Datastore source used in the Dataflow job.
#[prost(message, repeated, tag = "7")]
pub datastore_details: ::prost::alloc::vec::Vec<DatastoreIoDetails>,
}
/// A message describing the state of a particular execution stage.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ExecutionStageState {
/// The name of the execution stage.
#[prost(string, tag = "1")]
pub execution_stage_name: ::prost::alloc::string::String,
/// Executions stage states allow the same set of values as JobState.
#[prost(enumeration = "JobState", tag = "2")]
pub execution_stage_state: i32,
/// The time at which the stage transitioned to this state.
#[prost(message, optional, tag = "3")]
pub current_state_time: ::core::option::Option<::prost_types::Timestamp>,
}
/// Additional information about how a Cloud Dataflow job will be executed that
/// isn't contained in the submitted job.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct JobExecutionInfo {
/// A mapping from each stage to the information about that stage.
#[prost(map = "string, message", tag = "1")]
pub stages: ::std::collections::HashMap<::prost::alloc::string::String, JobExecutionStageInfo>,
}
/// Contains information about how a particular
/// [google.dataflow.v1beta3.Step][google.dataflow.v1beta3.Step] will be
/// executed.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct JobExecutionStageInfo {
/// The steps associated with the execution stage.
/// Note that stages may have several steps, and that a given step
/// might be run by more than one stage.
#[prost(string, repeated, tag = "1")]
pub step_name: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
}
/// The data within all Job events.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct JobEventData {
/// The Job event payload.
#[prost(message, optional, tag = "1")]
pub payload: ::core::option::Option<Job>,
}
/// Specifies the processing model used by a
/// \[google.dataflow.v1beta3.Job\], which determines the way the Job is
/// managed by the Cloud Dataflow service (how workers are scheduled, how
/// inputs are sharded, etc).
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum JobType {
/// The type of the job is unspecified, or unknown.
Unknown = 0,
/// A batch job with a well-defined end point: data is read, data is
/// processed, data is written, and the job is done.
Batch = 1,
/// A continuously streaming job with no end: data is read,
/// processed, and written continuously.
Streaming = 2,
}
impl JobType {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
Self::Unknown => "JOB_TYPE_UNKNOWN",
Self::Batch => "JOB_TYPE_BATCH",
Self::Streaming => "JOB_TYPE_STREAMING",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"JOB_TYPE_UNKNOWN" => Some(Self::Unknown),
"JOB_TYPE_BATCH" => Some(Self::Batch),
"JOB_TYPE_STREAMING" => Some(Self::Streaming),
_ => None,
}
}
}
/// Specifies the resource to optimize for in Flexible Resource Scheduling.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum FlexResourceSchedulingGoal {
/// Run in the default mode.
FlexrsUnspecified = 0,
/// Optimize for lower execution time.
FlexrsSpeedOptimized = 1,
/// Optimize for lower cost.
FlexrsCostOptimized = 2,
}
impl FlexResourceSchedulingGoal {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
Self::FlexrsUnspecified => "FLEXRS_UNSPECIFIED",
Self::FlexrsSpeedOptimized => "FLEXRS_SPEED_OPTIMIZED",
Self::FlexrsCostOptimized => "FLEXRS_COST_OPTIMIZED",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"FLEXRS_UNSPECIFIED" => Some(Self::FlexrsUnspecified),
"FLEXRS_SPEED_OPTIMIZED" => Some(Self::FlexrsSpeedOptimized),
"FLEXRS_COST_OPTIMIZED" => Some(Self::FlexrsCostOptimized),
_ => None,
}
}
}
/// Specifies what happens to a resource when a Cloud Dataflow
/// [google.dataflow.v1beta3.Job][google.dataflow.v1beta3.Job] has completed.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum TeardownPolicy {
/// The teardown policy isn't specified, or is unknown.
Unknown = 0,
/// Always teardown the resource.
TeardownAlways = 1,
/// Teardown the resource on success. This is useful for debugging
/// failures.
TeardownOnSuccess = 2,
/// Never teardown the resource. This is useful for debugging and
/// development.
TeardownNever = 3,
}
impl TeardownPolicy {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
Self::Unknown => "TEARDOWN_POLICY_UNKNOWN",
Self::TeardownAlways => "TEARDOWN_ALWAYS",
Self::TeardownOnSuccess => "TEARDOWN_ON_SUCCESS",
Self::TeardownNever => "TEARDOWN_NEVER",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"TEARDOWN_POLICY_UNKNOWN" => Some(Self::Unknown),
"TEARDOWN_ALWAYS" => Some(Self::TeardownAlways),
"TEARDOWN_ON_SUCCESS" => Some(Self::TeardownOnSuccess),
"TEARDOWN_NEVER" => Some(Self::TeardownNever),
_ => None,
}
}
}
/// The default set of packages to be staged on a pool of workers.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum DefaultPackageSet {
/// The default set of packages to stage is unknown, or unspecified.
Unknown = 0,
/// Indicates that no packages should be staged at the worker unless
/// explicitly specified by the job.
None = 1,
/// Stage packages typically useful to workers written in Java.
Java = 2,
/// Stage packages typically useful to workers written in Python.
Python = 3,
}
impl DefaultPackageSet {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
Self::Unknown => "DEFAULT_PACKAGE_SET_UNKNOWN",
Self::None => "DEFAULT_PACKAGE_SET_NONE",
Self::Java => "DEFAULT_PACKAGE_SET_JAVA",
Self::Python => "DEFAULT_PACKAGE_SET_PYTHON",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"DEFAULT_PACKAGE_SET_UNKNOWN" => Some(Self::Unknown),
"DEFAULT_PACKAGE_SET_NONE" => Some(Self::None),
"DEFAULT_PACKAGE_SET_JAVA" => Some(Self::Java),
"DEFAULT_PACKAGE_SET_PYTHON" => Some(Self::Python),
_ => None,
}
}
}
/// Specifies the algorithm used to determine the number of worker
/// processes to run at any given point in time, based on the amount of
/// data left to process, the number of workers, and how quickly
/// existing workers are processing data.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum AutoscalingAlgorithm {
/// The algorithm is unknown, or unspecified.
Unknown = 0,
/// Disable autoscaling.
None = 1,
/// Increase worker count over time to reduce job execution time.
Basic = 2,
}
impl AutoscalingAlgorithm {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
Self::Unknown => "AUTOSCALING_ALGORITHM_UNKNOWN",
Self::None => "AUTOSCALING_ALGORITHM_NONE",
Self::Basic => "AUTOSCALING_ALGORITHM_BASIC",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"AUTOSCALING_ALGORITHM_UNKNOWN" => Some(Self::Unknown),
"AUTOSCALING_ALGORITHM_NONE" => Some(Self::None),
"AUTOSCALING_ALGORITHM_BASIC" => Some(Self::Basic),
_ => None,
}
}
}
/// Specifies how IP addresses should be allocated to the worker machines.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum WorkerIpAddressConfiguration {
/// The configuration is unknown, or unspecified.
WorkerIpUnspecified = 0,
/// Workers should have public IP addresses.
WorkerIpPublic = 1,
/// Workers should have private IP addresses.
WorkerIpPrivate = 2,
}
impl WorkerIpAddressConfiguration {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
Self::WorkerIpUnspecified => "WORKER_IP_UNSPECIFIED",
Self::WorkerIpPublic => "WORKER_IP_PUBLIC",
Self::WorkerIpPrivate => "WORKER_IP_PRIVATE",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"WORKER_IP_UNSPECIFIED" => Some(Self::WorkerIpUnspecified),
"WORKER_IP_PUBLIC" => Some(Self::WorkerIpPublic),
"WORKER_IP_PRIVATE" => Some(Self::WorkerIpPrivate),
_ => None,
}
}
}
/// Specifies the shuffle mode used by a
/// \[google.dataflow.v1beta3.Job\], which determines the approach data is shuffled
/// during processing. More details in:
/// <https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#dataflow-shuffle>
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum ShuffleMode {
/// Shuffle mode information is not available.
Unspecified = 0,
/// Shuffle is done on the worker VMs.
VmBased = 1,
/// Shuffle is done on the service side.
ServiceBased = 2,
}
impl ShuffleMode {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
Self::Unspecified => "SHUFFLE_MODE_UNSPECIFIED",
Self::VmBased => "VM_BASED",
Self::ServiceBased => "SERVICE_BASED",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"SHUFFLE_MODE_UNSPECIFIED" => Some(Self::Unspecified),
"VM_BASED" => Some(Self::VmBased),
"SERVICE_BASED" => Some(Self::ServiceBased),
_ => None,
}
}
}
/// Describes the overall state of a
/// [google.dataflow.v1beta3.Job][google.dataflow.v1beta3.Job].
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum JobState {
/// The job's run state isn't specified.
Unknown = 0,
/// `JOB_STATE_STOPPED` indicates that the job has not
/// yet started to run.
Stopped = 1,
/// `JOB_STATE_RUNNING` indicates that the job is currently running.
Running = 2,
/// `JOB_STATE_DONE` indicates that the job has successfully completed.
/// This is a terminal job state. This state may be set by the Cloud Dataflow
/// service, as a transition from `JOB_STATE_RUNNING`. It may also be set via a
/// Cloud Dataflow `UpdateJob` call, if the job has not yet reached a terminal
/// state.
Done = 3,
/// `JOB_STATE_FAILED` indicates that the job has failed. This is a
/// terminal job state. This state may only be set by the Cloud Dataflow
/// service, and only as a transition from `JOB_STATE_RUNNING`.
Failed = 4,
/// `JOB_STATE_CANCELLED` indicates that the job has been explicitly
/// cancelled. This is a terminal job state. This state may only be
/// set via a Cloud Dataflow `UpdateJob` call, and only if the job has not
/// yet reached another terminal state.
Cancelled = 5,
/// `JOB_STATE_UPDATED` indicates that the job was successfully updated,
/// meaning that this job was stopped and another job was started, inheriting
/// state from this one. This is a terminal job state. This state may only be
/// set by the Cloud Dataflow service, and only as a transition from
/// `JOB_STATE_RUNNING`.
Updated = 6,
/// `JOB_STATE_DRAINING` indicates that the job is in the process of draining.
/// A draining job has stopped pulling from its input sources and is processing
/// any data that remains in-flight. This state may be set via a Cloud Dataflow
/// `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. Jobs
/// that are draining may only transition to `JOB_STATE_DRAINED`,
/// `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`.
Draining = 7,
/// `JOB_STATE_DRAINED` indicates that the job has been drained.
/// A drained job terminated by stopping pulling from its input sources and
/// processing any data that remained in-flight when draining was requested.
/// This state is a terminal state, may only be set by the Cloud Dataflow
/// service, and only as a transition from `JOB_STATE_DRAINING`.
Drained = 8,
/// `JOB_STATE_PENDING` indicates that the job has been created but is not yet
/// running. Jobs that are pending may only transition to `JOB_STATE_RUNNING`,
/// or `JOB_STATE_FAILED`.
Pending = 9,
/// `JOB_STATE_CANCELLING` indicates that the job has been explicitly cancelled
/// and is in the process of stopping. Jobs that are cancelling may only
/// transition to `JOB_STATE_CANCELLED` or `JOB_STATE_FAILED`.
Cancelling = 10,
/// `JOB_STATE_QUEUED` indicates that the job has been created but is being
/// delayed until launch. Jobs that are queued may only transition to
/// `JOB_STATE_PENDING` or `JOB_STATE_CANCELLED`.
Queued = 11,
/// `JOB_STATE_RESOURCE_CLEANING_UP` indicates that the batch job's associated
/// resources are currently being cleaned up after a successful run.
/// Currently, this is an opt-in feature, please reach out to Cloud support
/// team if you are interested.
ResourceCleaningUp = 12,
}
impl JobState {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
Self::Unknown => "JOB_STATE_UNKNOWN",
Self::Stopped => "JOB_STATE_STOPPED",
Self::Running => "JOB_STATE_RUNNING",
Self::Done => "JOB_STATE_DONE",
Self::Failed => "JOB_STATE_FAILED",
Self::Cancelled => "JOB_STATE_CANCELLED",
Self::Updated => "JOB_STATE_UPDATED",
Self::Draining => "JOB_STATE_DRAINING",
Self::Drained => "JOB_STATE_DRAINED",
Self::Pending => "JOB_STATE_PENDING",
Self::Cancelling => "JOB_STATE_CANCELLING",
Self::Queued => "JOB_STATE_QUEUED",
Self::ResourceCleaningUp => "JOB_STATE_RESOURCE_CLEANING_UP",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"JOB_STATE_UNKNOWN" => Some(Self::Unknown),
"JOB_STATE_STOPPED" => Some(Self::Stopped),
"JOB_STATE_RUNNING" => Some(Self::Running),
"JOB_STATE_DONE" => Some(Self::Done),
"JOB_STATE_FAILED" => Some(Self::Failed),
"JOB_STATE_CANCELLED" => Some(Self::Cancelled),
"JOB_STATE_UPDATED" => Some(Self::Updated),
"JOB_STATE_DRAINING" => Some(Self::Draining),
"JOB_STATE_DRAINED" => Some(Self::Drained),
"JOB_STATE_PENDING" => Some(Self::Pending),
"JOB_STATE_CANCELLING" => Some(Self::Cancelling),
"JOB_STATE_QUEUED" => Some(Self::Queued),
"JOB_STATE_RESOURCE_CLEANING_UP" => Some(Self::ResourceCleaningUp),
_ => None,
}
}
}
/// The CloudEvent raised when a Job status changes.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct JobStatusChangedEvent {
/// The data associated with the event.
#[prost(message, optional, tag = "1")]
pub data: ::core::option::Option<JobEventData>,
}