1
   2
   3
   4
   5
   6
   7
   8
   9
  10
  11
  12
  13
  14
  15
  16
  17
  18
  19
  20
  21
  22
  23
  24
  25
  26
  27
  28
  29
  30
  31
  32
  33
  34
  35
  36
  37
  38
  39
  40
  41
  42
  43
  44
  45
  46
  47
  48
  49
  50
  51
  52
  53
  54
  55
  56
  57
  58
  59
  60
  61
  62
  63
  64
  65
  66
  67
  68
  69
  70
  71
  72
  73
  74
  75
  76
  77
  78
  79
  80
  81
  82
  83
  84
  85
  86
  87
  88
  89
  90
  91
  92
  93
  94
  95
  96
  97
  98
  99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SecretMount {
    /// Name must be the name of the secret in kubernetes.
    #[prost(string, tag="1")]
    pub name: std::string::String,
    /// Key of the secret to load into env_var, this field only has meaning if EnvVar != "".
    #[prost(string, tag="4")]
    pub key: std::string::String,
    #[prost(string, tag="2")]
    pub mount_path: std::string::String,
    #[prost(string, tag="3")]
    pub env_var: std::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Transform {
    #[prost(string, tag="1")]
    pub image: std::string::String,
    #[prost(string, repeated, tag="2")]
    pub cmd: ::std::vec::Vec<std::string::String>,
    #[prost(string, repeated, tag="13")]
    pub err_cmd: ::std::vec::Vec<std::string::String>,
    #[prost(map="string, string", tag="3")]
    pub env: ::std::collections::HashMap<std::string::String, std::string::String>,
    #[prost(message, repeated, tag="4")]
    pub secrets: ::std::vec::Vec<SecretMount>,
    #[prost(string, repeated, tag="9")]
    pub image_pull_secrets: ::std::vec::Vec<std::string::String>,
    #[prost(string, repeated, tag="5")]
    pub stdin: ::std::vec::Vec<std::string::String>,
    #[prost(string, repeated, tag="14")]
    pub err_stdin: ::std::vec::Vec<std::string::String>,
    #[prost(int64, repeated, tag="6")]
    pub accept_return_code: ::std::vec::Vec<i64>,
    #[prost(bool, tag="7")]
    pub debug: bool,
    #[prost(string, tag="10")]
    pub user: std::string::String,
    #[prost(string, tag="11")]
    pub working_dir: std::string::String,
    #[prost(string, tag="12")]
    pub dockerfile: std::string::String,
    #[prost(message, optional, tag="15")]
    pub build: ::std::option::Option<BuildSpec>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BuildSpec {
    #[prost(string, tag="1")]
    pub path: std::string::String,
    #[prost(string, tag="2")]
    pub language: std::string::String,
    #[prost(string, tag="3")]
    pub image: std::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TfJob {
    /// tf_job  is a serialized Kubeflow TFJob spec. Pachyderm sends this directly
    /// to a kubernetes cluster on which kubeflow has been installed, instead of
    /// creating a pipeline ReplicationController as it normally would.
    #[prost(string, tag="1")]
    pub tf_job: std::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Egress {
    #[prost(string, tag="1")]
    pub url: std::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Job {
    #[prost(string, tag="1")]
    pub id: std::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Metadata {
    #[prost(map="string, string", tag="1")]
    pub annotations: ::std::collections::HashMap<std::string::String, std::string::String>,
    #[prost(map="string, string", tag="2")]
    pub labels: ::std::collections::HashMap<std::string::String, std::string::String>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Service {
    #[prost(int32, tag="1")]
    pub internal_port: i32,
    #[prost(int32, tag="2")]
    pub external_port: i32,
    #[prost(string, tag="3")]
    pub ip: std::string::String,
    #[prost(string, tag="4")]
    pub r#type: std::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Spout {
    #[prost(bool, tag="1")]
    pub overwrite: bool,
    #[prost(message, optional, tag="2")]
    pub service: ::std::option::Option<Service>,
    #[prost(string, tag="3")]
    pub marker: std::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PfsInput {
    #[prost(string, tag="1")]
    pub name: std::string::String,
    #[prost(string, tag="2")]
    pub repo: std::string::String,
    #[prost(string, tag="3")]
    pub branch: std::string::String,
    #[prost(string, tag="4")]
    pub commit: std::string::String,
    #[prost(string, tag="5")]
    pub glob: std::string::String,
    #[prost(string, tag="8")]
    pub join_on: std::string::String,
    #[prost(bool, tag="6")]
    pub lazy: bool,
    /// EmptyFiles, if true, will cause files from this PFS input to be
    /// presented as empty files. This is useful in shuffle pipelines where you
    /// want to read the names of files and reorganize them using symlinks.
    #[prost(bool, tag="7")]
    pub empty_files: bool,
    /// S3, if true, will cause the worker to NOT download or link files from this
    /// input into the /pfs directory. Instead, an instance of our S3 gateway
    /// service will run on each of the sidecars, and data can be retrieved from
    /// this input by querying
    /// http://<pipeline>-s3.<namespace>/<job id>.<input>/my/file
    #[prost(bool, tag="9")]
    pub s3: bool,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CronInput {
    #[prost(string, tag="1")]
    pub name: std::string::String,
    #[prost(string, tag="2")]
    pub repo: std::string::String,
    #[prost(string, tag="3")]
    pub commit: std::string::String,
    #[prost(string, tag="4")]
    pub spec: std::string::String,
    /// Overwrite, if true, will expose a single datum that gets overwritten each
    /// tick. If false, it will create a new datum for each tick.
    #[prost(bool, tag="6")]
    pub overwrite: bool,
    #[prost(message, optional, tag="5")]
    pub start: ::std::option::Option<::prost_types::Timestamp>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GitInput {
    #[prost(string, tag="1")]
    pub name: std::string::String,
    #[prost(string, tag="2")]
    pub url: std::string::String,
    #[prost(string, tag="3")]
    pub branch: std::string::String,
    #[prost(string, tag="4")]
    pub commit: std::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Input {
    #[prost(message, optional, tag="6")]
    pub pfs: ::std::option::Option<PfsInput>,
    #[prost(message, repeated, tag="7")]
    pub join: ::std::vec::Vec<Input>,
    #[prost(message, repeated, tag="2")]
    pub cross: ::std::vec::Vec<Input>,
    #[prost(message, repeated, tag="3")]
    pub union: ::std::vec::Vec<Input>,
    #[prost(message, optional, tag="4")]
    pub cron: ::std::option::Option<CronInput>,
    #[prost(message, optional, tag="5")]
    pub git: ::std::option::Option<GitInput>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct JobInput {
    #[prost(string, tag="4")]
    pub name: std::string::String,
    #[prost(message, optional, tag="1")]
    pub commit: ::std::option::Option<super::pfs::Commit>,
    #[prost(string, tag="2")]
    pub glob: std::string::String,
    #[prost(bool, tag="3")]
    pub lazy: bool,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ParallelismSpec {
    /// Starts the pipeline/job with a 'constant' workers, unless 'constant' is
    /// zero. If 'constant' is zero (which is the zero value of ParallelismSpec),
    /// then Pachyderm will choose the number of workers that is started,
    /// (currently it chooses the number of workers in the cluster)
    #[prost(uint64, tag="2")]
    pub constant: u64,
    /// Starts the pipeline/job with number of workers equal to 'coefficient' * N,
    /// where N is the number of nodes in the kubernetes cluster.
    ///
    /// For example, if each Kubernetes node has four CPUs, you might set
    /// 'coefficient' to four, so that there are four Pachyderm workers per
    /// Kubernetes node, and each Pachyderm worker gets one CPU. If you want to
    /// reserve half the nodes in your cluster for other tasks, you might set
    /// 'coefficient' to 0.5.
    #[prost(double, tag="3")]
    pub coefficient: f64,
}
/// HashTreeSpec sets the number of shards into which pps splits a pipeline's
/// output commits (sharded commits are implemented in Pachyderm 1.8+ only)
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct HashtreeSpec {
    #[prost(uint64, tag="1")]
    pub constant: u64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct InputFile {
    /// This file's absolute path within its pfs repo.
    #[prost(string, tag="4")]
    pub path: std::string::String,
    /// This file's hash
    #[prost(bytes, tag="5")]
    pub hash: std::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Datum {
    /// ID is the hash computed from all the files
    #[prost(string, tag="1")]
    pub id: std::string::String,
    #[prost(message, optional, tag="2")]
    pub job: ::std::option::Option<Job>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DatumInfo {
    #[prost(message, optional, tag="1")]
    pub datum: ::std::option::Option<Datum>,
    #[prost(enumeration="DatumState", tag="2")]
    pub state: i32,
    #[prost(message, optional, tag="3")]
    pub stats: ::std::option::Option<ProcessStats>,
    #[prost(message, optional, tag="4")]
    pub pfs_state: ::std::option::Option<super::pfs::File>,
    #[prost(message, repeated, tag="5")]
    pub data: ::std::vec::Vec<super::pfs::FileInfo>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Aggregate {
    #[prost(int64, tag="1")]
    pub count: i64,
    #[prost(double, tag="2")]
    pub mean: f64,
    #[prost(double, tag="3")]
    pub stddev: f64,
    #[prost(double, tag="4")]
    pub fifth_percentile: f64,
    #[prost(double, tag="5")]
    pub ninety_fifth_percentile: f64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ProcessStats {
    #[prost(message, optional, tag="1")]
    pub download_time: ::std::option::Option<::prost_types::Duration>,
    #[prost(message, optional, tag="2")]
    pub process_time: ::std::option::Option<::prost_types::Duration>,
    #[prost(message, optional, tag="3")]
    pub upload_time: ::std::option::Option<::prost_types::Duration>,
    #[prost(uint64, tag="4")]
    pub download_bytes: u64,
    #[prost(uint64, tag="5")]
    pub upload_bytes: u64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AggregateProcessStats {
    #[prost(message, optional, tag="1")]
    pub download_time: ::std::option::Option<Aggregate>,
    #[prost(message, optional, tag="2")]
    pub process_time: ::std::option::Option<Aggregate>,
    #[prost(message, optional, tag="3")]
    pub upload_time: ::std::option::Option<Aggregate>,
    #[prost(message, optional, tag="4")]
    pub download_bytes: ::std::option::Option<Aggregate>,
    #[prost(message, optional, tag="5")]
    pub upload_bytes: ::std::option::Option<Aggregate>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct WorkerStatus {
    #[prost(string, tag="1")]
    pub worker_id: std::string::String,
    #[prost(string, tag="2")]
    pub job_id: std::string::String,
    #[prost(message, repeated, tag="3")]
    pub data: ::std::vec::Vec<InputFile>,
    /// Started is the time processing on the current datum began.
    #[prost(message, optional, tag="4")]
    pub started: ::std::option::Option<::prost_types::Timestamp>,
    #[prost(message, optional, tag="5")]
    pub stats: ::std::option::Option<ProcessStats>,
    #[prost(int64, tag="6")]
    pub queue_size: i64,
}
/// ResourceSpec describes the amount of resources that pipeline pods should
/// request from kubernetes, for scheduling.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ResourceSpec {
    /// The number of CPUs each worker needs (partial values are allowed, and
    /// encouraged)
    #[prost(float, tag="1")]
    pub cpu: f32,
    /// The amount of memory each worker needs (in bytes, with allowed
    /// SI suffixes (M, K, G, Mi, Ki, Gi, etc).
    #[prost(string, tag="2")]
    pub memory: std::string::String,
    /// The spec for GPU resources.
    #[prost(message, optional, tag="5")]
    pub gpu: ::std::option::Option<GpuSpec>,
    /// The amount of ephemeral storage each worker needs (in bytes, with allowed
    /// SI suffixes (M, K, G, Mi, Ki, Gi, etc).
    #[prost(string, tag="4")]
    pub disk: std::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GpuSpec {
    /// The type of GPU (nvidia.com/gpu or amd.com/gpu for example).
    #[prost(string, tag="1")]
    pub r#type: std::string::String,
    /// The number of GPUs to request.
    #[prost(int64, tag="2")]
    pub number: i64,
}
/// EtcdJobInfo is the portion of the JobInfo that gets stored in etcd during
/// job execution. It contains fields which change over the lifetime of the job
/// but aren't used in the execution of the job.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct EtcdJobInfo {
    #[prost(message, optional, tag="1")]
    pub job: ::std::option::Option<Job>,
    #[prost(message, optional, tag="2")]
    pub pipeline: ::std::option::Option<Pipeline>,
    #[prost(message, optional, tag="3")]
    pub output_commit: ::std::option::Option<super::pfs::Commit>,
    /// Job restart count (e.g. due to datum failure)
    #[prost(uint64, tag="4")]
    pub restart: u64,
    /// Counts of how many times we processed or skipped a datum
    #[prost(int64, tag="5")]
    pub data_processed: i64,
    #[prost(int64, tag="6")]
    pub data_skipped: i64,
    #[prost(int64, tag="7")]
    pub data_total: i64,
    #[prost(int64, tag="8")]
    pub data_failed: i64,
    #[prost(int64, tag="15")]
    pub data_recovered: i64,
    /// Download/process/upload time and download/upload bytes
    #[prost(message, optional, tag="9")]
    pub stats: ::std::option::Option<ProcessStats>,
    #[prost(message, optional, tag="10")]
    pub stats_commit: ::std::option::Option<super::pfs::Commit>,
    #[prost(enumeration="JobState", tag="11")]
    pub state: i32,
    #[prost(string, tag="12")]
    pub reason: std::string::String,
    #[prost(message, optional, tag="13")]
    pub started: ::std::option::Option<::prost_types::Timestamp>,
    #[prost(message, optional, tag="14")]
    pub finished: ::std::option::Option<::prost_types::Timestamp>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct JobInfo {
    #[prost(message, optional, tag="1")]
    pub job: ::std::option::Option<Job>,
    /// requires ListJobRequest.Full
    #[prost(message, optional, tag="2")]
    pub transform: ::std::option::Option<Transform>,
    #[prost(message, optional, tag="3")]
    pub pipeline: ::std::option::Option<Pipeline>,
    /// requires ListJobRequest.Full
    #[prost(uint64, tag="13")]
    pub pipeline_version: u64,
    #[prost(message, optional, tag="47")]
    pub spec_commit: ::std::option::Option<super::pfs::Commit>,
    /// requires ListJobRequest.Full
    #[prost(message, optional, tag="12")]
    pub parallelism_spec: ::std::option::Option<ParallelismSpec>,
    /// requires ListJobRequest.Full
    #[prost(message, optional, tag="15")]
    pub egress: ::std::option::Option<Egress>,
    #[prost(message, optional, tag="6")]
    pub parent_job: ::std::option::Option<Job>,
    #[prost(message, optional, tag="7")]
    pub started: ::std::option::Option<::prost_types::Timestamp>,
    #[prost(message, optional, tag="8")]
    pub finished: ::std::option::Option<::prost_types::Timestamp>,
    #[prost(message, optional, tag="9")]
    pub output_commit: ::std::option::Option<super::pfs::Commit>,
    #[prost(enumeration="JobState", tag="10")]
    pub state: i32,
    /// reason explains why the job is in the current state
    #[prost(string, tag="35")]
    pub reason: std::string::String,
    /// requires ListJobRequest.Full
    #[prost(message, optional, tag="14")]
    pub service: ::std::option::Option<Service>,
    /// requires ListJobRequest.Full
    #[prost(message, optional, tag="45")]
    pub spout: ::std::option::Option<Spout>,
    #[prost(message, optional, tag="18")]
    pub output_repo: ::std::option::Option<super::pfs::Repo>,
    /// requires ListJobRequest.Full
    #[prost(string, tag="17")]
    pub output_branch: std::string::String,
    #[prost(uint64, tag="20")]
    pub restart: u64,
    #[prost(int64, tag="22")]
    pub data_processed: i64,
    #[prost(int64, tag="30")]
    pub data_skipped: i64,
    #[prost(int64, tag="40")]
    pub data_failed: i64,
    #[prost(int64, tag="46")]
    pub data_recovered: i64,
    #[prost(int64, tag="23")]
    pub data_total: i64,
    #[prost(message, optional, tag="31")]
    pub stats: ::std::option::Option<ProcessStats>,
    #[prost(message, repeated, tag="24")]
    pub worker_status: ::std::vec::Vec<WorkerStatus>,
    /// requires ListJobRequest.Full
    #[prost(message, optional, tag="25")]
    pub resource_requests: ::std::option::Option<ResourceSpec>,
    /// requires ListJobRequest.Full
    #[prost(message, optional, tag="36")]
    pub resource_limits: ::std::option::Option<ResourceSpec>,
    /// requires ListJobRequest.Full
    #[prost(message, optional, tag="48")]
    pub sidecar_resource_limits: ::std::option::Option<ResourceSpec>,
    /// requires ListJobRequest.Full
    #[prost(message, optional, tag="26")]
    pub input: ::std::option::Option<Input>,
    #[prost(message, optional, tag="27")]
    pub new_branch: ::std::option::Option<super::pfs::BranchInfo>,
    #[prost(message, optional, tag="29")]
    pub stats_commit: ::std::option::Option<super::pfs::Commit>,
    /// requires ListJobRequest.Full
    #[prost(bool, tag="32")]
    pub enable_stats: bool,
    /// requires ListJobRequest.Full
    #[prost(string, tag="33")]
    pub salt: std::string::String,
    /// requires ListJobRequest.Full
    #[prost(message, optional, tag="37")]
    pub chunk_spec: ::std::option::Option<ChunkSpec>,
    /// requires ListJobRequest.Full
    #[prost(message, optional, tag="38")]
    pub datum_timeout: ::std::option::Option<::prost_types::Duration>,
    /// requires ListJobRequest.Full
    #[prost(message, optional, tag="39")]
    pub job_timeout: ::std::option::Option<::prost_types::Duration>,
    /// requires ListJobRequest.Full
    #[prost(int64, tag="41")]
    pub datum_tries: i64,
    /// requires ListJobRequest.Full
    #[prost(message, optional, tag="42")]
    pub scheduling_spec: ::std::option::Option<SchedulingSpec>,
    /// requires ListJobRequest.Full
    #[prost(string, tag="43")]
    pub pod_spec: std::string::String,
    /// requires ListJobRequest.Full
    #[prost(string, tag="44")]
    pub pod_patch: std::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Worker {
    #[prost(string, tag="1")]
    pub name: std::string::String,
    #[prost(enumeration="WorkerState", tag="2")]
    pub state: i32,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct JobInfos {
    #[prost(message, repeated, tag="1")]
    pub job_info: ::std::vec::Vec<JobInfo>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Pipeline {
    #[prost(string, tag="1")]
    pub name: std::string::String,
}
/// EtcdPipelineInfo is proto that Pachd stores in etcd for each pipeline. It
/// tracks the state of the pipeline, and points to its metadata in PFS (and,
/// by pointing to a PFS commit, de facto tracks the pipeline's version)
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct EtcdPipelineInfo {
    #[prost(enumeration="PipelineState", tag="1")]
    pub state: i32,
    #[prost(string, tag="4")]
    pub reason: std::string::String,
    #[prost(message, optional, tag="2")]
    pub spec_commit: ::std::option::Option<super::pfs::Commit>,
    #[prost(map="int32, int32", tag="3")]
    pub job_counts: ::std::collections::HashMap<i32, i32>,
    #[prost(string, tag="5")]
    pub auth_token: std::string::String,
    #[prost(enumeration="JobState", tag="6")]
    pub last_job_state: i32,
    /// parallelism tracks the literal number of workers that this pipeline should
    /// run. Unlike PipelineInfo.ParallelismSpec, this accounts for the number of
    /// nodes in the k8s cluster if Coefficient parallelism is used (i.e. if
    /// Coefficient is 2 and the cluster has 5 nodes, this will be set to 10 by
    /// pachd). This allows the worker master to shard work correctly without
    /// k8s privileges and without knowing the number of cluster nodes in the
    /// Coefficient case.
    #[prost(uint64, tag="7")]
    pub parallelism: u64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PipelineInfo {
    #[prost(string, tag="17")]
    pub id: std::string::String,
    #[prost(message, optional, tag="1")]
    pub pipeline: ::std::option::Option<Pipeline>,
    #[prost(uint64, tag="11")]
    pub version: u64,
    #[prost(message, optional, tag="2")]
    pub transform: ::std::option::Option<Transform>,
    /// tf_job encodes a Kubeflow TFJob spec. Pachyderm uses this to create TFJobs
    /// when running in a kubernetes cluster on which kubeflow has been installed.
    /// Exactly one of 'tf_job' and 'transform' should be set
    #[prost(message, optional, tag="46")]
    pub tf_job: ::std::option::Option<TfJob>,
    #[prost(message, optional, tag="10")]
    pub parallelism_spec: ::std::option::Option<ParallelismSpec>,
    #[prost(message, optional, tag="42")]
    pub hashtree_spec: ::std::option::Option<HashtreeSpec>,
    #[prost(message, optional, tag="15")]
    pub egress: ::std::option::Option<Egress>,
    #[prost(message, optional, tag="6")]
    pub created_at: ::std::option::Option<::prost_types::Timestamp>,
    /// state indicates the current state of the pipeline. This is not stored in
    /// PFS along with the rest of this data structure--PPS.InspectPipeline fills
    /// it in
    #[prost(enumeration="PipelineState", tag="7")]
    pub state: i32,
    /// same for stopped field
    #[prost(bool, tag="38")]
    pub stopped: bool,
    #[prost(string, tag="8")]
    pub recent_error: std::string::String,
    #[prost(int64, tag="49")]
    pub workers_requested: i64,
    #[prost(int64, tag="50")]
    pub workers_available: i64,
    /// job_counts and last_job_state indicates the number of jobs within this
    /// pipeline in a given state and the state of the most recently created job,
    /// respectively. This is not stored in PFS along with the rest of this data
    /// structure--PPS.InspectPipeline fills it in from the EtcdPipelineInfo.
    #[prost(map="int32, int32", tag="9")]
    pub job_counts: ::std::collections::HashMap<i32, i32>,
    #[prost(enumeration="JobState", tag="43")]
    pub last_job_state: i32,
    #[prost(string, tag="16")]
    pub output_branch: std::string::String,
    #[prost(message, optional, tag="19")]
    pub resource_requests: ::std::option::Option<ResourceSpec>,
    #[prost(message, optional, tag="31")]
    pub resource_limits: ::std::option::Option<ResourceSpec>,
    #[prost(message, optional, tag="51")]
    pub sidecar_resource_limits: ::std::option::Option<ResourceSpec>,
    #[prost(message, optional, tag="20")]
    pub input: ::std::option::Option<Input>,
    #[prost(string, tag="21")]
    pub description: std::string::String,
    #[prost(string, tag="23")]
    pub cache_size: std::string::String,
    #[prost(bool, tag="24")]
    pub enable_stats: bool,
    #[prost(string, tag="25")]
    pub salt: std::string::String,
    /// reason includes any error messages associated with a failed pipeline
    #[prost(string, tag="28")]
    pub reason: std::string::String,
    #[prost(int64, tag="29")]
    pub max_queue_size: i64,
    #[prost(message, optional, tag="30")]
    pub service: ::std::option::Option<Service>,
    #[prost(message, optional, tag="45")]
    pub spout: ::std::option::Option<Spout>,
    #[prost(message, optional, tag="32")]
    pub chunk_spec: ::std::option::Option<ChunkSpec>,
    #[prost(message, optional, tag="33")]
    pub datum_timeout: ::std::option::Option<::prost_types::Duration>,
    #[prost(message, optional, tag="34")]
    pub job_timeout: ::std::option::Option<::prost_types::Duration>,
    #[prost(string, tag="35")]
    pub githook_url: std::string::String,
    #[prost(message, optional, tag="36")]
    pub spec_commit: ::std::option::Option<super::pfs::Commit>,
    #[prost(bool, tag="37")]
    pub standby: bool,
    #[prost(int64, tag="39")]
    pub datum_tries: i64,
    #[prost(message, optional, tag="40")]
    pub scheduling_spec: ::std::option::Option<SchedulingSpec>,
    #[prost(string, tag="41")]
    pub pod_spec: std::string::String,
    #[prost(string, tag="44")]
    pub pod_patch: std::string::String,
    #[prost(bool, tag="47")]
    pub s3_out: bool,
    #[prost(message, optional, tag="48")]
    pub metadata: ::std::option::Option<Metadata>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PipelineInfos {
    #[prost(message, repeated, tag="1")]
    pub pipeline_info: ::std::vec::Vec<PipelineInfo>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CreateJobRequest {
    #[prost(message, optional, tag="2")]
    pub pipeline: ::std::option::Option<Pipeline>,
    #[prost(message, optional, tag="25")]
    pub output_commit: ::std::option::Option<super::pfs::Commit>,
    /// Fields below should only be set when restoring an extracted job.
    #[prost(uint64, tag="26")]
    pub restart: u64,
    /// Counts of how many times we processed or skipped a datum
    #[prost(int64, tag="27")]
    pub data_processed: i64,
    #[prost(int64, tag="28")]
    pub data_skipped: i64,
    #[prost(int64, tag="29")]
    pub data_total: i64,
    #[prost(int64, tag="30")]
    pub data_failed: i64,
    #[prost(int64, tag="31")]
    pub data_recovered: i64,
    /// Download/process/upload time and download/upload bytes
    #[prost(message, optional, tag="32")]
    pub stats: ::std::option::Option<ProcessStats>,
    #[prost(message, optional, tag="33")]
    pub stats_commit: ::std::option::Option<super::pfs::Commit>,
    #[prost(enumeration="JobState", tag="34")]
    pub state: i32,
    #[prost(string, tag="35")]
    pub reason: std::string::String,
    #[prost(message, optional, tag="36")]
    pub started: ::std::option::Option<::prost_types::Timestamp>,
    #[prost(message, optional, tag="37")]
    pub finished: ::std::option::Option<::prost_types::Timestamp>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct InspectJobRequest {
    /// Callers should set either Job or OutputCommit, not both.
    #[prost(message, optional, tag="1")]
    pub job: ::std::option::Option<Job>,
    #[prost(message, optional, tag="3")]
    pub output_commit: ::std::option::Option<super::pfs::Commit>,
    /// block until state is either JOB_STATE_FAILURE or JOB_STATE_SUCCESS
    #[prost(bool, tag="2")]
    pub block_state: bool,
    #[prost(bool, tag="4")]
    pub full: bool,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ListJobRequest {
    /// nil means all pipelines
    #[prost(message, optional, tag="1")]
    pub pipeline: ::std::option::Option<Pipeline>,
    /// nil means all inputs
    #[prost(message, repeated, tag="2")]
    pub input_commit: ::std::vec::Vec<super::pfs::Commit>,
    /// nil means all outputs
    #[prost(message, optional, tag="3")]
    pub output_commit: ::std::option::Option<super::pfs::Commit>,
    /// History indicates return jobs from historical versions of pipelines
    /// semantics are:
    /// 0: Return jobs from the current version of the pipeline or pipelines.
    /// 1: Return the above and jobs from the next most recent version
    /// 2: etc.
    ///-1: Return jobs from all historical versions.
    #[prost(int64, tag="4")]
    pub history: i64,
    /// Full indicates whether the result should include all pipeline details in
    /// each JobInfo, or limited information including name and status, but
    /// excluding information in the pipeline spec. Leaving this "false" can make
    /// the call significantly faster in clusters with a large number of pipelines
    /// and jobs.
    /// Note that if 'input_commit' is set, this field is coerced to "true"
    #[prost(bool, tag="5")]
    pub full: bool,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct FlushJobRequest {
    #[prost(message, repeated, tag="1")]
    pub commits: ::std::vec::Vec<super::pfs::Commit>,
    #[prost(message, repeated, tag="2")]
    pub to_pipelines: ::std::vec::Vec<Pipeline>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DeleteJobRequest {
    #[prost(message, optional, tag="1")]
    pub job: ::std::option::Option<Job>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StopJobRequest {
    #[prost(message, optional, tag="1")]
    pub job: ::std::option::Option<Job>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct UpdateJobStateRequest {
    #[prost(message, optional, tag="1")]
    pub job: ::std::option::Option<Job>,
    #[prost(enumeration="JobState", tag="2")]
    pub state: i32,
    #[prost(string, tag="3")]
    pub reason: std::string::String,
    #[prost(uint64, tag="4")]
    pub restart: u64,
    #[prost(int64, tag="5")]
    pub data_processed: i64,
    #[prost(int64, tag="6")]
    pub data_skipped: i64,
    #[prost(int64, tag="7")]
    pub data_failed: i64,
    #[prost(int64, tag="8")]
    pub data_recovered: i64,
    #[prost(int64, tag="9")]
    pub data_total: i64,
    #[prost(message, optional, tag="10")]
    pub stats: ::std::option::Option<ProcessStats>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GetLogsRequest {
    /// The pipeline from which we want to get logs (required if the job in 'job'
    /// was created as part of a pipeline. To get logs from a non-orphan job
    /// without the pipeline that created it, you need to use ElasticSearch).
    #[prost(message, optional, tag="2")]
    pub pipeline: ::std::option::Option<Pipeline>,
    /// The job from which we want to get logs.
    #[prost(message, optional, tag="1")]
    pub job: ::std::option::Option<Job>,
    /// Names of input files from which we want processing logs. This may contain
    /// multiple files, to query pipelines that contain multiple inputs. Each
    /// filter may be an absolute path of a file within a pps repo, or it may be
    /// a hash for that file (to search for files at specific versions)
    #[prost(string, repeated, tag="3")]
    pub data_filters: ::std::vec::Vec<std::string::String>,
    #[prost(message, optional, tag="6")]
    pub datum: ::std::option::Option<Datum>,
    /// If true get logs from the master process
    #[prost(bool, tag="5")]
    pub master: bool,
    /// Continue to follow new logs as they become available.
    #[prost(bool, tag="7")]
    pub follow: bool,
    /// If nonzero, the number of lines from the end of the logs to return.  Note:
    /// tail applies per container, so you will get tail * <number of pods> total
    /// lines back.
    #[prost(int64, tag="8")]
    pub tail: i64,
    /// UseLokiBackend causes the logs request to go through the loki backend
    /// rather than through kubernetes. This behavior can also be achieved by
    /// setting the LOKI_LOGGING feature flag.
    #[prost(bool, tag="9")]
    pub use_loki_backend: bool,
}
/// LogMessage is a log line from a PPS worker, annotated with metadata
/// indicating when and why the line was logged.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct LogMessage {
    /// The job and pipeline for which a PFS file is being processed (if the job
    /// is an orphan job, pipeline name and ID will be unset)
    #[prost(string, tag="1")]
    pub pipeline_name: std::string::String,
    #[prost(string, tag="3")]
    pub job_id: std::string::String,
    #[prost(string, tag="7")]
    pub worker_id: std::string::String,
    #[prost(string, tag="9")]
    pub datum_id: std::string::String,
    #[prost(bool, tag="10")]
    pub master: bool,
    /// The PFS files being processed (one per pipeline/job input)
    #[prost(message, repeated, tag="4")]
    pub data: ::std::vec::Vec<InputFile>,
    /// User is true if log message comes from the users code.
    #[prost(bool, tag="8")]
    pub user: bool,
    /// The message logged, and the time at which it was logged
    #[prost(message, optional, tag="5")]
    pub ts: ::std::option::Option<::prost_types::Timestamp>,
    #[prost(string, tag="6")]
    pub message: std::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RestartDatumRequest {
    #[prost(message, optional, tag="1")]
    pub job: ::std::option::Option<Job>,
    #[prost(string, repeated, tag="2")]
    pub data_filters: ::std::vec::Vec<std::string::String>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct InspectDatumRequest {
    #[prost(message, optional, tag="1")]
    pub datum: ::std::option::Option<Datum>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ListDatumRequest {
    #[prost(message, optional, tag="1")]
    pub job: ::std::option::Option<Job>,
    #[prost(int64, tag="2")]
    pub page_size: i64,
    #[prost(int64, tag="3")]
    pub page: i64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ListDatumResponse {
    #[prost(message, repeated, tag="1")]
    pub datum_infos: ::std::vec::Vec<DatumInfo>,
    #[prost(int64, tag="2")]
    pub total_pages: i64,
    #[prost(int64, tag="3")]
    pub page: i64,
}
/// ListDatumStreamResponse is identical to ListDatumResponse, except that only
/// one DatumInfo is present (as these responses are streamed)
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ListDatumStreamResponse {
    #[prost(message, optional, tag="1")]
    pub datum_info: ::std::option::Option<DatumInfo>,
    /// total_pages is only set in the first response (and set to 0 in all other
    /// responses)
    #[prost(int64, tag="2")]
    pub total_pages: i64,
    /// page is only set in the first response (and set to 0 in all other
    /// responses)
    #[prost(int64, tag="3")]
    pub page: i64,
}
/// ChunkSpec specifies how a pipeline should chunk its datums.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ChunkSpec {
    /// number, if nonzero, specifies that each chunk should contain `number`
    /// datums. Chunks may contain fewer if the total number of datums don't
    /// divide evenly.
    #[prost(int64, tag="1")]
    pub number: i64,
    /// size_bytes, if nonzero, specifies a target size for each chunk of datums.
    /// Chunks may be larger or smaller than size_bytes, but will usually be
    /// pretty close to size_bytes in size.
    #[prost(int64, tag="2")]
    pub size_bytes: i64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SchedulingSpec {
    #[prost(map="string, string", tag="1")]
    pub node_selector: ::std::collections::HashMap<std::string::String, std::string::String>,
    #[prost(string, tag="2")]
    pub priority_class_name: std::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CreatePipelineRequest {
    #[prost(message, optional, tag="1")]
    pub pipeline: ::std::option::Option<Pipeline>,
    /// tf_job encodes a Kubeflow TFJob spec. Pachyderm uses this to create TFJobs
    /// when running in a kubernetes cluster on which kubeflow has been installed.
    /// Exactly one of 'tf_job' and 'transform' should be set
    #[prost(message, optional, tag="35")]
    pub tf_job: ::std::option::Option<TfJob>,
    #[prost(message, optional, tag="2")]
    pub transform: ::std::option::Option<Transform>,
    #[prost(message, optional, tag="7")]
    pub parallelism_spec: ::std::option::Option<ParallelismSpec>,
    #[prost(message, optional, tag="31")]
    pub hashtree_spec: ::std::option::Option<HashtreeSpec>,
    #[prost(message, optional, tag="9")]
    pub egress: ::std::option::Option<Egress>,
    #[prost(bool, tag="5")]
    pub update: bool,
    #[prost(string, tag="10")]
    pub output_branch: std::string::String,
    /// s3_out, if set, requires a pipeline's user to write to its output repo
    /// via Pachyderm's s3 gateway (if set, workers will serve Pachyderm's s3
    /// gateway API at http://<pipeline>-s3.<namespace>/<job id>.out/my/file).
    /// In this mode /pfs/out won't be walked or uploaded, and the s3 gateway
    /// service in the workers will allow writes to the job's output commit
    #[prost(bool, tag="36")]
    pub s3_out: bool,
    #[prost(message, optional, tag="12")]
    pub resource_requests: ::std::option::Option<ResourceSpec>,
    #[prost(message, optional, tag="22")]
    pub resource_limits: ::std::option::Option<ResourceSpec>,
    #[prost(message, optional, tag="47")]
    pub sidecar_resource_limits: ::std::option::Option<ResourceSpec>,
    #[prost(message, optional, tag="13")]
    pub input: ::std::option::Option<Input>,
    #[prost(string, tag="14")]
    pub description: std::string::String,
    #[prost(string, tag="16")]
    pub cache_size: std::string::String,
    #[prost(bool, tag="17")]
    pub enable_stats: bool,
    /// Reprocess forces the pipeline to reprocess all datums.
    /// It only has meaning if Update is true
    #[prost(bool, tag="18")]
    pub reprocess: bool,
    #[prost(int64, tag="20")]
    pub max_queue_size: i64,
    #[prost(message, optional, tag="21")]
    pub service: ::std::option::Option<Service>,
    #[prost(message, optional, tag="33")]
    pub spout: ::std::option::Option<Spout>,
    #[prost(message, optional, tag="23")]
    pub chunk_spec: ::std::option::Option<ChunkSpec>,
    #[prost(message, optional, tag="24")]
    pub datum_timeout: ::std::option::Option<::prost_types::Duration>,
    #[prost(message, optional, tag="25")]
    pub job_timeout: ::std::option::Option<::prost_types::Duration>,
    #[prost(string, tag="26")]
    pub salt: std::string::String,
    #[prost(bool, tag="27")]
    pub standby: bool,
    #[prost(int64, tag="28")]
    pub datum_tries: i64,
    #[prost(message, optional, tag="29")]
    pub scheduling_spec: ::std::option::Option<SchedulingSpec>,
    /// deprecated, use pod_patch below
    #[prost(string, tag="30")]
    pub pod_spec: std::string::String,
    /// a json patch will be applied to the pipeline's pod_spec before it's created;
    #[prost(string, tag="32")]
    pub pod_patch: std::string::String,
    #[prost(message, optional, tag="34")]
    pub spec_commit: ::std::option::Option<super::pfs::Commit>,
    #[prost(message, optional, tag="46")]
    pub metadata: ::std::option::Option<Metadata>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct InspectPipelineRequest {
    #[prost(message, optional, tag="1")]
    pub pipeline: ::std::option::Option<Pipeline>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ListPipelineRequest {
    /// If non-nil, only return info about a single pipeline, this is redundant
    /// with InspectPipeline unless history is non-zero.
    #[prost(message, optional, tag="1")]
    pub pipeline: ::std::option::Option<Pipeline>,
    /// History indicates how many historical versions you want returned. Its
    /// semantics are:
    /// 0: Return the current version of the pipeline or pipelines.
    /// 1: Return the above and the next most recent version
    /// 2: etc.
    ///-1: Return all historical versions.
    #[prost(int64, tag="2")]
    pub history: i64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DeletePipelineRequest {
    #[prost(message, optional, tag="1")]
    pub pipeline: ::std::option::Option<Pipeline>,
    #[prost(bool, tag="4")]
    pub all: bool,
    #[prost(bool, tag="5")]
    pub force: bool,
    #[prost(bool, tag="6")]
    pub keep_repo: bool,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StartPipelineRequest {
    #[prost(message, optional, tag="1")]
    pub pipeline: ::std::option::Option<Pipeline>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct StopPipelineRequest {
    #[prost(message, optional, tag="1")]
    pub pipeline: ::std::option::Option<Pipeline>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RunPipelineRequest {
    #[prost(message, optional, tag="1")]
    pub pipeline: ::std::option::Option<Pipeline>,
    #[prost(message, repeated, tag="2")]
    pub provenance: ::std::vec::Vec<super::pfs::CommitProvenance>,
    #[prost(string, tag="4")]
    pub job_id: std::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct RunCronRequest {
    #[prost(message, optional, tag="1")]
    pub pipeline: ::std::option::Option<Pipeline>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CreateSecretRequest {
    #[prost(bytes, tag="1")]
    pub file: std::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DeleteSecretRequest {
    #[prost(message, optional, tag="1")]
    pub secret: ::std::option::Option<Secret>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct InspectSecretRequest {
    #[prost(message, optional, tag="1")]
    pub secret: ::std::option::Option<Secret>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Secret {
    #[prost(string, tag="1")]
    pub name: std::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SecretInfo {
    #[prost(message, optional, tag="1")]
    pub secret: ::std::option::Option<Secret>,
    #[prost(string, tag="2")]
    pub r#type: std::string::String,
    #[prost(message, optional, tag="3")]
    pub creation_timestamp: ::std::option::Option<::prost_types::Timestamp>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SecretInfos {
    #[prost(message, repeated, tag="1")]
    pub secret_info: ::std::vec::Vec<SecretInfo>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GarbageCollectRequest {
    /// Memory is how much memory to use in computing which objects are alive. A
    /// larger number will result in more precise garbage collection (at the
    /// cost of more memory usage).
    #[prost(int64, tag="1")]
    pub memory_bytes: i64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct GarbageCollectResponse {
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ActivateAuthRequest {
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ActivateAuthResponse {
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum JobState {
    JobStarting = 0,
    JobRunning = 1,
    JobFailure = 2,
    JobSuccess = 3,
    JobKilled = 4,
    JobMerging = 5,
    JobEgressing = 6,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum DatumState {
    Failed = 0,
    Success = 1,
    Skipped = 2,
    Starting = 3,
    Recovered = 4,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum WorkerState {
    PodRunning = 0,
    PodSuccess = 1,
    PodFailed = 2,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum PipelineState {
    /// There is an EtcdPipelineInfo + spec commit, but no RC
    /// This happens when a pipeline has been created but not yet picked up by a
    /// PPS server.
    PipelineStarting = 0,
    /// A pipeline has a spec commit and a service + RC
    /// This is the normal state of a pipeline.
    PipelineRunning = 1,
    /// Equivalent to STARTING (there is an EtcdPipelineInfo + commit, but no RC)
    /// After some error caused runPipeline to exit, but before the pipeline is
    /// re-run. This is when the exponential backoff is in effect.
    PipelineRestarting = 2,
    /// The pipeline has encountered unrecoverable errors and is no longer being
    /// retried. It won't leave this state until the pipeline is updated.
    PipelineFailure = 3,
    /// The pipeline has been explicitly paused by the user (the pipeline spec's
    /// Stopped field should be true if the pipeline is in this state)
    PipelinePaused = 4,
    /// The pipeline is fully functional, but there are no commits to process.
    PipelineStandby = 5,
    /// The pipeline's workers are crashing, or failing to come up, this may
    /// resolve itself, the pipeline may make progress while in this state if the
    /// problem is only being experienced by some workers.
    PipelineCrashing = 6,
}
# [ doc = r" Generated client implementations." ] pub mod api_client { # ! [ allow ( unused_variables , dead_code , missing_docs ) ] use tonic :: codegen :: * ; pub struct ApiClient < T > { inner : tonic :: client :: Grpc < T > , } impl ApiClient < tonic :: transport :: Channel > { # [ doc = r" Attempt to create a new client by connecting to a given endpoint." ] pub async fn connect < D > ( dst : D ) -> Result < Self , tonic :: transport :: Error > where D : std :: convert :: TryInto < tonic :: transport :: Endpoint > , D :: Error : Into < StdError > , { let conn = tonic :: transport :: Endpoint :: new ( dst ) ? . connect ( ) . await ? ; Ok ( Self :: new ( conn ) ) } } impl < T > ApiClient < T > where T : tonic :: client :: GrpcService < tonic :: body :: BoxBody > , T :: ResponseBody : Body + HttpBody + Send + 'static , T :: Error : Into < StdError > , < T :: ResponseBody as HttpBody > :: Error : Into < StdError > + Send , { pub fn new ( inner : T ) -> Self { let inner = tonic :: client :: Grpc :: new ( inner ) ; Self { inner } } pub fn with_interceptor ( inner : T , interceptor : impl Into < tonic :: Interceptor > ) -> Self { let inner = tonic :: client :: Grpc :: with_interceptor ( inner , interceptor ) ; Self { inner } } pub async fn create_job ( & mut self , request : impl tonic :: IntoRequest < super :: CreateJobRequest > , ) -> Result < tonic :: Response < super :: Job > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/CreateJob" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } pub async fn inspect_job ( & mut self , request : impl tonic :: IntoRequest < super :: InspectJobRequest > , ) -> Result < tonic :: Response < super :: JobInfo > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/InspectJob" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " ListJob returns information about current and past Pachyderm jobs. This is" ] # [ doc = " deprecated in favor of ListJobStream" ] pub async fn list_job ( & mut self , request : impl tonic :: IntoRequest < super :: ListJobRequest > , ) -> Result < tonic :: Response < super :: JobInfos > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/ListJob" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " ListJobStream returns information about current and past Pachyderm jobs." ] pub async fn list_job_stream ( & mut self , request : impl tonic :: IntoRequest < super :: ListJobRequest > , ) -> Result < tonic :: Response < tonic :: codec :: Streaming < super :: JobInfo >> , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/ListJobStream" ) ; self . inner . server_streaming ( request . into_request ( ) , path , codec ) . await } pub async fn flush_job ( & mut self , request : impl tonic :: IntoRequest < super :: FlushJobRequest > , ) -> Result < tonic :: Response < tonic :: codec :: Streaming < super :: JobInfo >> , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/FlushJob" ) ; self . inner . server_streaming ( request . into_request ( ) , path , codec ) . await } pub async fn delete_job ( & mut self , request : impl tonic :: IntoRequest < super :: DeleteJobRequest > , ) -> Result < tonic :: Response < ( ) > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/DeleteJob" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } pub async fn stop_job ( & mut self , request : impl tonic :: IntoRequest < super :: StopJobRequest > , ) -> Result < tonic :: Response < ( ) > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/StopJob" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } pub async fn inspect_datum ( & mut self , request : impl tonic :: IntoRequest < super :: InspectDatumRequest > , ) -> Result < tonic :: Response < super :: DatumInfo > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/InspectDatum" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " ListDatum returns information about each datum fed to a Pachyderm job. This" ] # [ doc = " is deprecated in favor of ListDatumStream" ] pub async fn list_datum ( & mut self , request : impl tonic :: IntoRequest < super :: ListDatumRequest > , ) -> Result < tonic :: Response < super :: ListDatumResponse > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/ListDatum" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " ListDatumStream returns information about each datum fed to a Pachyderm job" ] pub async fn list_datum_stream ( & mut self , request : impl tonic :: IntoRequest < super :: ListDatumRequest > , ) -> Result < tonic :: Response < tonic :: codec :: Streaming < super :: ListDatumStreamResponse >> , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/ListDatumStream" ) ; self . inner . server_streaming ( request . into_request ( ) , path , codec ) . await } pub async fn restart_datum ( & mut self , request : impl tonic :: IntoRequest < super :: RestartDatumRequest > , ) -> Result < tonic :: Response < ( ) > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/RestartDatum" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } pub async fn create_pipeline ( & mut self , request : impl tonic :: IntoRequest < super :: CreatePipelineRequest > , ) -> Result < tonic :: Response < ( ) > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/CreatePipeline" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } pub async fn inspect_pipeline ( & mut self , request : impl tonic :: IntoRequest < super :: InspectPipelineRequest > , ) -> Result < tonic :: Response < super :: PipelineInfo > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/InspectPipeline" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } pub async fn list_pipeline ( & mut self , request : impl tonic :: IntoRequest < super :: ListPipelineRequest > , ) -> Result < tonic :: Response < super :: PipelineInfos > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/ListPipeline" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } pub async fn delete_pipeline ( & mut self , request : impl tonic :: IntoRequest < super :: DeletePipelineRequest > , ) -> Result < tonic :: Response < ( ) > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/DeletePipeline" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } pub async fn start_pipeline ( & mut self , request : impl tonic :: IntoRequest < super :: StartPipelineRequest > , ) -> Result < tonic :: Response < ( ) > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/StartPipeline" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } pub async fn stop_pipeline ( & mut self , request : impl tonic :: IntoRequest < super :: StopPipelineRequest > , ) -> Result < tonic :: Response < ( ) > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/StopPipeline" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } pub async fn run_pipeline ( & mut self , request : impl tonic :: IntoRequest < super :: RunPipelineRequest > , ) -> Result < tonic :: Response < ( ) > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/RunPipeline" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } pub async fn run_cron ( & mut self , request : impl tonic :: IntoRequest < super :: RunCronRequest > , ) -> Result < tonic :: Response < ( ) > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/RunCron" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } pub async fn create_secret ( & mut self , request : impl tonic :: IntoRequest < super :: CreateSecretRequest > , ) -> Result < tonic :: Response < ( ) > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/CreateSecret" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } pub async fn delete_secret ( & mut self , request : impl tonic :: IntoRequest < super :: DeleteSecretRequest > , ) -> Result < tonic :: Response < ( ) > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/DeleteSecret" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } pub async fn list_secret ( & mut self , request : impl tonic :: IntoRequest < ( ) > , ) -> Result < tonic :: Response < super :: SecretInfos > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/ListSecret" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } pub async fn inspect_secret ( & mut self , request : impl tonic :: IntoRequest < super :: InspectSecretRequest > , ) -> Result < tonic :: Response < super :: SecretInfo > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/InspectSecret" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " DeleteAll deletes everything" ] pub async fn delete_all ( & mut self , request : impl tonic :: IntoRequest < ( ) > , ) -> Result < tonic :: Response < ( ) > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/DeleteAll" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } pub async fn get_logs ( & mut self , request : impl tonic :: IntoRequest < super :: GetLogsRequest > , ) -> Result < tonic :: Response < tonic :: codec :: Streaming < super :: LogMessage >> , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/GetLogs" ) ; self . inner . server_streaming ( request . into_request ( ) , path , codec ) . await } # [ doc = " Garbage collection" ] pub async fn garbage_collect ( & mut self , request : impl tonic :: IntoRequest < super :: GarbageCollectRequest > , ) -> Result < tonic :: Response < super :: GarbageCollectResponse > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/GarbageCollect" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " An internal call that causes PPS to put itself into an auth-enabled state" ] # [ doc = " (all pipeline have tokens, correct permissions, etcd)" ] pub async fn activate_auth ( & mut self , request : impl tonic :: IntoRequest < super :: ActivateAuthRequest > , ) -> Result < tonic :: Response < super :: ActivateAuthResponse > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/ActivateAuth" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } # [ doc = " An internal call used to move a job from one state to another" ] pub async fn update_job_state ( & mut self , request : impl tonic :: IntoRequest < super :: UpdateJobStateRequest > , ) -> Result < tonic :: Response < ( ) > , tonic :: Status > { self . inner . ready ( ) . await . map_err ( | e | { tonic :: Status :: new ( tonic :: Code :: Unknown , format ! ( "Service was not ready: {}" , e . into ( ) ) ) } ) ? ; let codec = tonic :: codec :: ProstCodec :: default ( ) ; let path = http :: uri :: PathAndQuery :: from_static ( "/pps.API/UpdateJobState" ) ; self . inner . unary ( request . into_request ( ) , path , codec ) . await } } impl < T : Clone > Clone for ApiClient < T > { fn clone ( & self ) -> Self { Self { inner : self . inner . clone ( ) , } } } impl < T > std :: fmt :: Debug for ApiClient < T > { fn fmt ( & self , f : & mut std :: fmt :: Formatter < '_ > ) -> std :: fmt :: Result { write ! ( f , "ApiClient {{ ... }}" ) } } }