tensorflow-serving-client 2.3.0

A prebuilt tensorflow serving client from the tensorflow serving proto files
Documentation
// This file is generated by rust-protobuf 2.14.0. Do not edit
// @generated

// https://github.com/rust-lang/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clippy::all)]

#![cfg_attr(rustfmt, rustfmt_skip)]

#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unsafe_code)]
#![allow(unused_imports)]
#![allow(unused_results)]
//! Generated file from `tensorflow/core/protobuf/config.proto`

use protobuf::Message as Message_imported_for_functions;
use protobuf::ProtobufEnum as ProtobufEnum_imported_for_functions;

/// Generated files are compatible only with the same version
/// of protobuf runtime.
// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_14_0;

#[derive(PartialEq,Clone,Default)]
pub struct GPUOptions {
    // message fields
    pub per_process_gpu_memory_fraction: f64,
    pub allow_growth: bool,
    pub allocator_type: ::std::string::String,
    pub deferred_deletion_bytes: i64,
    pub visible_device_list: ::std::string::String,
    pub polling_active_delay_usecs: i32,
    pub polling_inactive_delay_msecs: i32,
    pub force_gpu_compatible: bool,
    pub experimental: ::protobuf::SingularPtrField<GPUOptions_Experimental>,
    // special fields
    pub unknown_fields: ::protobuf::UnknownFields,
    pub cached_size: ::protobuf::CachedSize,
}

impl<'a> ::std::default::Default for &'a GPUOptions {
    fn default() -> &'a GPUOptions {
        <GPUOptions as ::protobuf::Message>::default_instance()
    }
}

impl GPUOptions {
    pub fn new() -> GPUOptions {
        ::std::default::Default::default()
    }

    // double per_process_gpu_memory_fraction = 1;


    pub fn get_per_process_gpu_memory_fraction(&self) -> f64 {
        self.per_process_gpu_memory_fraction
    }
    pub fn clear_per_process_gpu_memory_fraction(&mut self) {
        self.per_process_gpu_memory_fraction = 0.;
    }

    // Param is passed by value, moved
    pub fn set_per_process_gpu_memory_fraction(&mut self, v: f64) {
        self.per_process_gpu_memory_fraction = v;
    }

    // bool allow_growth = 4;


    pub fn get_allow_growth(&self) -> bool {
        self.allow_growth
    }
    pub fn clear_allow_growth(&mut self) {
        self.allow_growth = false;
    }

    // Param is passed by value, moved
    pub fn set_allow_growth(&mut self, v: bool) {
        self.allow_growth = v;
    }

    // string allocator_type = 2;


    pub fn get_allocator_type(&self) -> &str {
        &self.allocator_type
    }
    pub fn clear_allocator_type(&mut self) {
        self.allocator_type.clear();
    }

    // Param is passed by value, moved
    pub fn set_allocator_type(&mut self, v: ::std::string::String) {
        self.allocator_type = v;
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_allocator_type(&mut self) -> &mut ::std::string::String {
        &mut self.allocator_type
    }

    // Take field
    pub fn take_allocator_type(&mut self) -> ::std::string::String {
        ::std::mem::replace(&mut self.allocator_type, ::std::string::String::new())
    }

    // int64 deferred_deletion_bytes = 3;


    pub fn get_deferred_deletion_bytes(&self) -> i64 {
        self.deferred_deletion_bytes
    }
    pub fn clear_deferred_deletion_bytes(&mut self) {
        self.deferred_deletion_bytes = 0;
    }

    // Param is passed by value, moved
    pub fn set_deferred_deletion_bytes(&mut self, v: i64) {
        self.deferred_deletion_bytes = v;
    }

    // string visible_device_list = 5;


    pub fn get_visible_device_list(&self) -> &str {
        &self.visible_device_list
    }
    pub fn clear_visible_device_list(&mut self) {
        self.visible_device_list.clear();
    }

    // Param is passed by value, moved
    pub fn set_visible_device_list(&mut self, v: ::std::string::String) {
        self.visible_device_list = v;
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_visible_device_list(&mut self) -> &mut ::std::string::String {
        &mut self.visible_device_list
    }

    // Take field
    pub fn take_visible_device_list(&mut self) -> ::std::string::String {
        ::std::mem::replace(&mut self.visible_device_list, ::std::string::String::new())
    }

    // int32 polling_active_delay_usecs = 6;


    pub fn get_polling_active_delay_usecs(&self) -> i32 {
        self.polling_active_delay_usecs
    }
    pub fn clear_polling_active_delay_usecs(&mut self) {
        self.polling_active_delay_usecs = 0;
    }

    // Param is passed by value, moved
    pub fn set_polling_active_delay_usecs(&mut self, v: i32) {
        self.polling_active_delay_usecs = v;
    }

    // int32 polling_inactive_delay_msecs = 7;


    pub fn get_polling_inactive_delay_msecs(&self) -> i32 {
        self.polling_inactive_delay_msecs
    }
    pub fn clear_polling_inactive_delay_msecs(&mut self) {
        self.polling_inactive_delay_msecs = 0;
    }

    // Param is passed by value, moved
    pub fn set_polling_inactive_delay_msecs(&mut self, v: i32) {
        self.polling_inactive_delay_msecs = v;
    }

    // bool force_gpu_compatible = 8;


    pub fn get_force_gpu_compatible(&self) -> bool {
        self.force_gpu_compatible
    }
    pub fn clear_force_gpu_compatible(&mut self) {
        self.force_gpu_compatible = false;
    }

    // Param is passed by value, moved
    pub fn set_force_gpu_compatible(&mut self, v: bool) {
        self.force_gpu_compatible = v;
    }

    // .tensorflow.GPUOptions.Experimental experimental = 9;


    pub fn get_experimental(&self) -> &GPUOptions_Experimental {
        self.experimental.as_ref().unwrap_or_else(|| GPUOptions_Experimental::default_instance())
    }
    pub fn clear_experimental(&mut self) {
        self.experimental.clear();
    }

    pub fn has_experimental(&self) -> bool {
        self.experimental.is_some()
    }

    // Param is passed by value, moved
    pub fn set_experimental(&mut self, v: GPUOptions_Experimental) {
        self.experimental = ::protobuf::SingularPtrField::some(v);
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_experimental(&mut self) -> &mut GPUOptions_Experimental {
        if self.experimental.is_none() {
            self.experimental.set_default();
        }
        self.experimental.as_mut().unwrap()
    }

    // Take field
    pub fn take_experimental(&mut self) -> GPUOptions_Experimental {
        self.experimental.take().unwrap_or_else(|| GPUOptions_Experimental::new())
    }
}

impl ::protobuf::Message for GPUOptions {
    fn is_initialized(&self) -> bool {
        for v in &self.experimental {
            if !v.is_initialized() {
                return false;
            }
        };
        true
    }

    fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        while !is.eof()? {
            let (field_number, wire_type) = is.read_tag_unpack()?;
            match field_number {
                1 => {
                    if wire_type != ::protobuf::wire_format::WireTypeFixed64 {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_double()?;
                    self.per_process_gpu_memory_fraction = tmp;
                },
                4 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.allow_growth = tmp;
                },
                2 => {
                    ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.allocator_type)?;
                },
                3 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int64()?;
                    self.deferred_deletion_bytes = tmp;
                },
                5 => {
                    ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.visible_device_list)?;
                },
                6 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int32()?;
                    self.polling_active_delay_usecs = tmp;
                },
                7 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int32()?;
                    self.polling_inactive_delay_msecs = tmp;
                },
                8 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.force_gpu_compatible = tmp;
                },
                9 => {
                    ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.experimental)?;
                },
                _ => {
                    ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
                },
            };
        }
        ::std::result::Result::Ok(())
    }

    // Compute sizes of nested messages
    #[allow(unused_variables)]
    fn compute_size(&self) -> u32 {
        let mut my_size = 0;
        if self.per_process_gpu_memory_fraction != 0. {
            my_size += 9;
        }
        if self.allow_growth != false {
            my_size += 2;
        }
        if !self.allocator_type.is_empty() {
            my_size += ::protobuf::rt::string_size(2, &self.allocator_type);
        }
        if self.deferred_deletion_bytes != 0 {
            my_size += ::protobuf::rt::value_size(3, self.deferred_deletion_bytes, ::protobuf::wire_format::WireTypeVarint);
        }
        if !self.visible_device_list.is_empty() {
            my_size += ::protobuf::rt::string_size(5, &self.visible_device_list);
        }
        if self.polling_active_delay_usecs != 0 {
            my_size += ::protobuf::rt::value_size(6, self.polling_active_delay_usecs, ::protobuf::wire_format::WireTypeVarint);
        }
        if self.polling_inactive_delay_msecs != 0 {
            my_size += ::protobuf::rt::value_size(7, self.polling_inactive_delay_msecs, ::protobuf::wire_format::WireTypeVarint);
        }
        if self.force_gpu_compatible != false {
            my_size += 2;
        }
        if let Some(ref v) = self.experimental.as_ref() {
            let len = v.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        }
        my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
        self.cached_size.set(my_size);
        my_size
    }

    fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        if self.per_process_gpu_memory_fraction != 0. {
            os.write_double(1, self.per_process_gpu_memory_fraction)?;
        }
        if self.allow_growth != false {
            os.write_bool(4, self.allow_growth)?;
        }
        if !self.allocator_type.is_empty() {
            os.write_string(2, &self.allocator_type)?;
        }
        if self.deferred_deletion_bytes != 0 {
            os.write_int64(3, self.deferred_deletion_bytes)?;
        }
        if !self.visible_device_list.is_empty() {
            os.write_string(5, &self.visible_device_list)?;
        }
        if self.polling_active_delay_usecs != 0 {
            os.write_int32(6, self.polling_active_delay_usecs)?;
        }
        if self.polling_inactive_delay_msecs != 0 {
            os.write_int32(7, self.polling_inactive_delay_msecs)?;
        }
        if self.force_gpu_compatible != false {
            os.write_bool(8, self.force_gpu_compatible)?;
        }
        if let Some(ref v) = self.experimental.as_ref() {
            os.write_tag(9, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        }
        os.write_unknown_fields(self.get_unknown_fields())?;
        ::std::result::Result::Ok(())
    }

    fn get_cached_size(&self) -> u32 {
        self.cached_size.get()
    }

    fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
        &self.unknown_fields
    }

    fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
        &mut self.unknown_fields
    }

    fn as_any(&self) -> &dyn (::std::any::Any) {
        self as &dyn (::std::any::Any)
    }
    fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
        self as &mut dyn (::std::any::Any)
    }
    fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
        self
    }

    fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
        Self::descriptor_static()
    }

    fn new() -> GPUOptions {
        GPUOptions::new()
    }

    fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                let mut fields = ::std::vec::Vec::new();
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeDouble>(
                    "per_process_gpu_memory_fraction",
                    |m: &GPUOptions| { &m.per_process_gpu_memory_fraction },
                    |m: &mut GPUOptions| { &mut m.per_process_gpu_memory_fraction },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "allow_growth",
                    |m: &GPUOptions| { &m.allow_growth },
                    |m: &mut GPUOptions| { &mut m.allow_growth },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
                    "allocator_type",
                    |m: &GPUOptions| { &m.allocator_type },
                    |m: &mut GPUOptions| { &mut m.allocator_type },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
                    "deferred_deletion_bytes",
                    |m: &GPUOptions| { &m.deferred_deletion_bytes },
                    |m: &mut GPUOptions| { &mut m.deferred_deletion_bytes },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
                    "visible_device_list",
                    |m: &GPUOptions| { &m.visible_device_list },
                    |m: &mut GPUOptions| { &mut m.visible_device_list },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
                    "polling_active_delay_usecs",
                    |m: &GPUOptions| { &m.polling_active_delay_usecs },
                    |m: &mut GPUOptions| { &mut m.polling_active_delay_usecs },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
                    "polling_inactive_delay_msecs",
                    |m: &GPUOptions| { &m.polling_inactive_delay_msecs },
                    |m: &mut GPUOptions| { &mut m.polling_inactive_delay_msecs },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "force_gpu_compatible",
                    |m: &GPUOptions| { &m.force_gpu_compatible },
                    |m: &mut GPUOptions| { &mut m.force_gpu_compatible },
                ));
                fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<GPUOptions_Experimental>>(
                    "experimental",
                    |m: &GPUOptions| { &m.experimental },
                    |m: &mut GPUOptions| { &mut m.experimental },
                ));
                ::protobuf::reflect::MessageDescriptor::new_pb_name::<GPUOptions>(
                    "GPUOptions",
                    fields,
                    file_descriptor_proto()
                )
            })
        }
    }

    fn default_instance() -> &'static GPUOptions {
        static mut instance: ::protobuf::lazy::Lazy<GPUOptions> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            instance.get(GPUOptions::new)
        }
    }
}

impl ::protobuf::Clear for GPUOptions {
    fn clear(&mut self) {
        self.per_process_gpu_memory_fraction = 0.;
        self.allow_growth = false;
        self.allocator_type.clear();
        self.deferred_deletion_bytes = 0;
        self.visible_device_list.clear();
        self.polling_active_delay_usecs = 0;
        self.polling_inactive_delay_msecs = 0;
        self.force_gpu_compatible = false;
        self.experimental.clear();
        self.unknown_fields.clear();
    }
}

impl ::std::fmt::Debug for GPUOptions {
    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        ::protobuf::text_format::fmt(self, f)
    }
}

impl ::protobuf::reflect::ProtobufValue for GPUOptions {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Message(self)
    }
}

#[derive(PartialEq,Clone,Default)]
pub struct GPUOptions_Experimental {
    // message fields
    pub virtual_devices: ::protobuf::RepeatedField<GPUOptions_Experimental_VirtualDevices>,
    pub use_unified_memory: bool,
    pub num_dev_to_dev_copy_streams: i32,
    pub collective_ring_order: ::std::string::String,
    pub timestamped_allocator: bool,
    pub kernel_tracker_max_interval: i32,
    pub kernel_tracker_max_bytes: i32,
    pub kernel_tracker_max_pending: i32,
    // special fields
    pub unknown_fields: ::protobuf::UnknownFields,
    pub cached_size: ::protobuf::CachedSize,
}

impl<'a> ::std::default::Default for &'a GPUOptions_Experimental {
    fn default() -> &'a GPUOptions_Experimental {
        <GPUOptions_Experimental as ::protobuf::Message>::default_instance()
    }
}

impl GPUOptions_Experimental {
    pub fn new() -> GPUOptions_Experimental {
        ::std::default::Default::default()
    }

    // repeated .tensorflow.GPUOptions.Experimental.VirtualDevices virtual_devices = 1;


    pub fn get_virtual_devices(&self) -> &[GPUOptions_Experimental_VirtualDevices] {
        &self.virtual_devices
    }
    pub fn clear_virtual_devices(&mut self) {
        self.virtual_devices.clear();
    }

    // Param is passed by value, moved
    pub fn set_virtual_devices(&mut self, v: ::protobuf::RepeatedField<GPUOptions_Experimental_VirtualDevices>) {
        self.virtual_devices = v;
    }

    // Mutable pointer to the field.
    pub fn mut_virtual_devices(&mut self) -> &mut ::protobuf::RepeatedField<GPUOptions_Experimental_VirtualDevices> {
        &mut self.virtual_devices
    }

    // Take field
    pub fn take_virtual_devices(&mut self) -> ::protobuf::RepeatedField<GPUOptions_Experimental_VirtualDevices> {
        ::std::mem::replace(&mut self.virtual_devices, ::protobuf::RepeatedField::new())
    }

    // bool use_unified_memory = 2;


    pub fn get_use_unified_memory(&self) -> bool {
        self.use_unified_memory
    }
    pub fn clear_use_unified_memory(&mut self) {
        self.use_unified_memory = false;
    }

    // Param is passed by value, moved
    pub fn set_use_unified_memory(&mut self, v: bool) {
        self.use_unified_memory = v;
    }

    // int32 num_dev_to_dev_copy_streams = 3;


    pub fn get_num_dev_to_dev_copy_streams(&self) -> i32 {
        self.num_dev_to_dev_copy_streams
    }
    pub fn clear_num_dev_to_dev_copy_streams(&mut self) {
        self.num_dev_to_dev_copy_streams = 0;
    }

    // Param is passed by value, moved
    pub fn set_num_dev_to_dev_copy_streams(&mut self, v: i32) {
        self.num_dev_to_dev_copy_streams = v;
    }

    // string collective_ring_order = 4;


    pub fn get_collective_ring_order(&self) -> &str {
        &self.collective_ring_order
    }
    pub fn clear_collective_ring_order(&mut self) {
        self.collective_ring_order.clear();
    }

    // Param is passed by value, moved
    pub fn set_collective_ring_order(&mut self, v: ::std::string::String) {
        self.collective_ring_order = v;
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_collective_ring_order(&mut self) -> &mut ::std::string::String {
        &mut self.collective_ring_order
    }

    // Take field
    pub fn take_collective_ring_order(&mut self) -> ::std::string::String {
        ::std::mem::replace(&mut self.collective_ring_order, ::std::string::String::new())
    }

    // bool timestamped_allocator = 5;


    pub fn get_timestamped_allocator(&self) -> bool {
        self.timestamped_allocator
    }
    pub fn clear_timestamped_allocator(&mut self) {
        self.timestamped_allocator = false;
    }

    // Param is passed by value, moved
    pub fn set_timestamped_allocator(&mut self, v: bool) {
        self.timestamped_allocator = v;
    }

    // int32 kernel_tracker_max_interval = 7;


    pub fn get_kernel_tracker_max_interval(&self) -> i32 {
        self.kernel_tracker_max_interval
    }
    pub fn clear_kernel_tracker_max_interval(&mut self) {
        self.kernel_tracker_max_interval = 0;
    }

    // Param is passed by value, moved
    pub fn set_kernel_tracker_max_interval(&mut self, v: i32) {
        self.kernel_tracker_max_interval = v;
    }

    // int32 kernel_tracker_max_bytes = 8;


    pub fn get_kernel_tracker_max_bytes(&self) -> i32 {
        self.kernel_tracker_max_bytes
    }
    pub fn clear_kernel_tracker_max_bytes(&mut self) {
        self.kernel_tracker_max_bytes = 0;
    }

    // Param is passed by value, moved
    pub fn set_kernel_tracker_max_bytes(&mut self, v: i32) {
        self.kernel_tracker_max_bytes = v;
    }

    // int32 kernel_tracker_max_pending = 9;


    pub fn get_kernel_tracker_max_pending(&self) -> i32 {
        self.kernel_tracker_max_pending
    }
    pub fn clear_kernel_tracker_max_pending(&mut self) {
        self.kernel_tracker_max_pending = 0;
    }

    // Param is passed by value, moved
    pub fn set_kernel_tracker_max_pending(&mut self, v: i32) {
        self.kernel_tracker_max_pending = v;
    }
}

impl ::protobuf::Message for GPUOptions_Experimental {
    fn is_initialized(&self) -> bool {
        for v in &self.virtual_devices {
            if !v.is_initialized() {
                return false;
            }
        };
        true
    }

    fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        while !is.eof()? {
            let (field_number, wire_type) = is.read_tag_unpack()?;
            match field_number {
                1 => {
                    ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.virtual_devices)?;
                },
                2 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.use_unified_memory = tmp;
                },
                3 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int32()?;
                    self.num_dev_to_dev_copy_streams = tmp;
                },
                4 => {
                    ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.collective_ring_order)?;
                },
                5 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.timestamped_allocator = tmp;
                },
                7 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int32()?;
                    self.kernel_tracker_max_interval = tmp;
                },
                8 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int32()?;
                    self.kernel_tracker_max_bytes = tmp;
                },
                9 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int32()?;
                    self.kernel_tracker_max_pending = tmp;
                },
                _ => {
                    ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
                },
            };
        }
        ::std::result::Result::Ok(())
    }

    // Compute sizes of nested messages
    #[allow(unused_variables)]
    fn compute_size(&self) -> u32 {
        let mut my_size = 0;
        for value in &self.virtual_devices {
            let len = value.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        };
        if self.use_unified_memory != false {
            my_size += 2;
        }
        if self.num_dev_to_dev_copy_streams != 0 {
            my_size += ::protobuf::rt::value_size(3, self.num_dev_to_dev_copy_streams, ::protobuf::wire_format::WireTypeVarint);
        }
        if !self.collective_ring_order.is_empty() {
            my_size += ::protobuf::rt::string_size(4, &self.collective_ring_order);
        }
        if self.timestamped_allocator != false {
            my_size += 2;
        }
        if self.kernel_tracker_max_interval != 0 {
            my_size += ::protobuf::rt::value_size(7, self.kernel_tracker_max_interval, ::protobuf::wire_format::WireTypeVarint);
        }
        if self.kernel_tracker_max_bytes != 0 {
            my_size += ::protobuf::rt::value_size(8, self.kernel_tracker_max_bytes, ::protobuf::wire_format::WireTypeVarint);
        }
        if self.kernel_tracker_max_pending != 0 {
            my_size += ::protobuf::rt::value_size(9, self.kernel_tracker_max_pending, ::protobuf::wire_format::WireTypeVarint);
        }
        my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
        self.cached_size.set(my_size);
        my_size
    }

    fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        for v in &self.virtual_devices {
            os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        };
        if self.use_unified_memory != false {
            os.write_bool(2, self.use_unified_memory)?;
        }
        if self.num_dev_to_dev_copy_streams != 0 {
            os.write_int32(3, self.num_dev_to_dev_copy_streams)?;
        }
        if !self.collective_ring_order.is_empty() {
            os.write_string(4, &self.collective_ring_order)?;
        }
        if self.timestamped_allocator != false {
            os.write_bool(5, self.timestamped_allocator)?;
        }
        if self.kernel_tracker_max_interval != 0 {
            os.write_int32(7, self.kernel_tracker_max_interval)?;
        }
        if self.kernel_tracker_max_bytes != 0 {
            os.write_int32(8, self.kernel_tracker_max_bytes)?;
        }
        if self.kernel_tracker_max_pending != 0 {
            os.write_int32(9, self.kernel_tracker_max_pending)?;
        }
        os.write_unknown_fields(self.get_unknown_fields())?;
        ::std::result::Result::Ok(())
    }

    fn get_cached_size(&self) -> u32 {
        self.cached_size.get()
    }

    fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
        &self.unknown_fields
    }

    fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
        &mut self.unknown_fields
    }

    fn as_any(&self) -> &dyn (::std::any::Any) {
        self as &dyn (::std::any::Any)
    }
    fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
        self as &mut dyn (::std::any::Any)
    }
    fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
        self
    }

    fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
        Self::descriptor_static()
    }

    fn new() -> GPUOptions_Experimental {
        GPUOptions_Experimental::new()
    }

    fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                let mut fields = ::std::vec::Vec::new();
                fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<GPUOptions_Experimental_VirtualDevices>>(
                    "virtual_devices",
                    |m: &GPUOptions_Experimental| { &m.virtual_devices },
                    |m: &mut GPUOptions_Experimental| { &mut m.virtual_devices },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "use_unified_memory",
                    |m: &GPUOptions_Experimental| { &m.use_unified_memory },
                    |m: &mut GPUOptions_Experimental| { &mut m.use_unified_memory },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
                    "num_dev_to_dev_copy_streams",
                    |m: &GPUOptions_Experimental| { &m.num_dev_to_dev_copy_streams },
                    |m: &mut GPUOptions_Experimental| { &mut m.num_dev_to_dev_copy_streams },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
                    "collective_ring_order",
                    |m: &GPUOptions_Experimental| { &m.collective_ring_order },
                    |m: &mut GPUOptions_Experimental| { &mut m.collective_ring_order },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "timestamped_allocator",
                    |m: &GPUOptions_Experimental| { &m.timestamped_allocator },
                    |m: &mut GPUOptions_Experimental| { &mut m.timestamped_allocator },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
                    "kernel_tracker_max_interval",
                    |m: &GPUOptions_Experimental| { &m.kernel_tracker_max_interval },
                    |m: &mut GPUOptions_Experimental| { &mut m.kernel_tracker_max_interval },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
                    "kernel_tracker_max_bytes",
                    |m: &GPUOptions_Experimental| { &m.kernel_tracker_max_bytes },
                    |m: &mut GPUOptions_Experimental| { &mut m.kernel_tracker_max_bytes },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
                    "kernel_tracker_max_pending",
                    |m: &GPUOptions_Experimental| { &m.kernel_tracker_max_pending },
                    |m: &mut GPUOptions_Experimental| { &mut m.kernel_tracker_max_pending },
                ));
                ::protobuf::reflect::MessageDescriptor::new_pb_name::<GPUOptions_Experimental>(
                    "GPUOptions.Experimental",
                    fields,
                    file_descriptor_proto()
                )
            })
        }
    }

    fn default_instance() -> &'static GPUOptions_Experimental {
        static mut instance: ::protobuf::lazy::Lazy<GPUOptions_Experimental> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            instance.get(GPUOptions_Experimental::new)
        }
    }
}

impl ::protobuf::Clear for GPUOptions_Experimental {
    fn clear(&mut self) {
        self.virtual_devices.clear();
        self.use_unified_memory = false;
        self.num_dev_to_dev_copy_streams = 0;
        self.collective_ring_order.clear();
        self.timestamped_allocator = false;
        self.kernel_tracker_max_interval = 0;
        self.kernel_tracker_max_bytes = 0;
        self.kernel_tracker_max_pending = 0;
        self.unknown_fields.clear();
    }
}

impl ::std::fmt::Debug for GPUOptions_Experimental {
    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        ::protobuf::text_format::fmt(self, f)
    }
}

impl ::protobuf::reflect::ProtobufValue for GPUOptions_Experimental {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Message(self)
    }
}

#[derive(PartialEq,Clone,Default)]
pub struct GPUOptions_Experimental_VirtualDevices {
    // message fields
    pub memory_limit_mb: ::std::vec::Vec<f32>,
    pub priority: ::std::vec::Vec<i32>,
    // special fields
    pub unknown_fields: ::protobuf::UnknownFields,
    pub cached_size: ::protobuf::CachedSize,
}

impl<'a> ::std::default::Default for &'a GPUOptions_Experimental_VirtualDevices {
    fn default() -> &'a GPUOptions_Experimental_VirtualDevices {
        <GPUOptions_Experimental_VirtualDevices as ::protobuf::Message>::default_instance()
    }
}

impl GPUOptions_Experimental_VirtualDevices {
    pub fn new() -> GPUOptions_Experimental_VirtualDevices {
        ::std::default::Default::default()
    }

    // repeated float memory_limit_mb = 1;


    pub fn get_memory_limit_mb(&self) -> &[f32] {
        &self.memory_limit_mb
    }
    pub fn clear_memory_limit_mb(&mut self) {
        self.memory_limit_mb.clear();
    }

    // Param is passed by value, moved
    pub fn set_memory_limit_mb(&mut self, v: ::std::vec::Vec<f32>) {
        self.memory_limit_mb = v;
    }

    // Mutable pointer to the field.
    pub fn mut_memory_limit_mb(&mut self) -> &mut ::std::vec::Vec<f32> {
        &mut self.memory_limit_mb
    }

    // Take field
    pub fn take_memory_limit_mb(&mut self) -> ::std::vec::Vec<f32> {
        ::std::mem::replace(&mut self.memory_limit_mb, ::std::vec::Vec::new())
    }

    // repeated int32 priority = 2;


    pub fn get_priority(&self) -> &[i32] {
        &self.priority
    }
    pub fn clear_priority(&mut self) {
        self.priority.clear();
    }

    // Param is passed by value, moved
    pub fn set_priority(&mut self, v: ::std::vec::Vec<i32>) {
        self.priority = v;
    }

    // Mutable pointer to the field.
    pub fn mut_priority(&mut self) -> &mut ::std::vec::Vec<i32> {
        &mut self.priority
    }

    // Take field
    pub fn take_priority(&mut self) -> ::std::vec::Vec<i32> {
        ::std::mem::replace(&mut self.priority, ::std::vec::Vec::new())
    }
}

impl ::protobuf::Message for GPUOptions_Experimental_VirtualDevices {
    fn is_initialized(&self) -> bool {
        true
    }

    fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        while !is.eof()? {
            let (field_number, wire_type) = is.read_tag_unpack()?;
            match field_number {
                1 => {
                    ::protobuf::rt::read_repeated_float_into(wire_type, is, &mut self.memory_limit_mb)?;
                },
                2 => {
                    ::protobuf::rt::read_repeated_int32_into(wire_type, is, &mut self.priority)?;
                },
                _ => {
                    ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
                },
            };
        }
        ::std::result::Result::Ok(())
    }

    // Compute sizes of nested messages
    #[allow(unused_variables)]
    fn compute_size(&self) -> u32 {
        let mut my_size = 0;
        my_size += 5 * self.memory_limit_mb.len() as u32;
        for value in &self.priority {
            my_size += ::protobuf::rt::value_size(2, *value, ::protobuf::wire_format::WireTypeVarint);
        };
        my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
        self.cached_size.set(my_size);
        my_size
    }

    fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        for v in &self.memory_limit_mb {
            os.write_float(1, *v)?;
        };
        for v in &self.priority {
            os.write_int32(2, *v)?;
        };
        os.write_unknown_fields(self.get_unknown_fields())?;
        ::std::result::Result::Ok(())
    }

    fn get_cached_size(&self) -> u32 {
        self.cached_size.get()
    }

    fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
        &self.unknown_fields
    }

    fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
        &mut self.unknown_fields
    }

    fn as_any(&self) -> &dyn (::std::any::Any) {
        self as &dyn (::std::any::Any)
    }
    fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
        self as &mut dyn (::std::any::Any)
    }
    fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
        self
    }

    fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
        Self::descriptor_static()
    }

    fn new() -> GPUOptions_Experimental_VirtualDevices {
        GPUOptions_Experimental_VirtualDevices::new()
    }

    fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                let mut fields = ::std::vec::Vec::new();
                fields.push(::protobuf::reflect::accessor::make_vec_accessor::<_, ::protobuf::types::ProtobufTypeFloat>(
                    "memory_limit_mb",
                    |m: &GPUOptions_Experimental_VirtualDevices| { &m.memory_limit_mb },
                    |m: &mut GPUOptions_Experimental_VirtualDevices| { &mut m.memory_limit_mb },
                ));
                fields.push(::protobuf::reflect::accessor::make_vec_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
                    "priority",
                    |m: &GPUOptions_Experimental_VirtualDevices| { &m.priority },
                    |m: &mut GPUOptions_Experimental_VirtualDevices| { &mut m.priority },
                ));
                ::protobuf::reflect::MessageDescriptor::new_pb_name::<GPUOptions_Experimental_VirtualDevices>(
                    "GPUOptions.Experimental.VirtualDevices",
                    fields,
                    file_descriptor_proto()
                )
            })
        }
    }

    fn default_instance() -> &'static GPUOptions_Experimental_VirtualDevices {
        static mut instance: ::protobuf::lazy::Lazy<GPUOptions_Experimental_VirtualDevices> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            instance.get(GPUOptions_Experimental_VirtualDevices::new)
        }
    }
}

impl ::protobuf::Clear for GPUOptions_Experimental_VirtualDevices {
    fn clear(&mut self) {
        self.memory_limit_mb.clear();
        self.priority.clear();
        self.unknown_fields.clear();
    }
}

impl ::std::fmt::Debug for GPUOptions_Experimental_VirtualDevices {
    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        ::protobuf::text_format::fmt(self, f)
    }
}

impl ::protobuf::reflect::ProtobufValue for GPUOptions_Experimental_VirtualDevices {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Message(self)
    }
}

#[derive(PartialEq,Clone,Default)]
pub struct OptimizerOptions {
    // message fields
    pub do_common_subexpression_elimination: bool,
    pub do_constant_folding: bool,
    pub max_folded_constant_in_bytes: i64,
    pub do_function_inlining: bool,
    pub opt_level: OptimizerOptions_Level,
    pub global_jit_level: OptimizerOptions_GlobalJitLevel,
    // special fields
    pub unknown_fields: ::protobuf::UnknownFields,
    pub cached_size: ::protobuf::CachedSize,
}

impl<'a> ::std::default::Default for &'a OptimizerOptions {
    fn default() -> &'a OptimizerOptions {
        <OptimizerOptions as ::protobuf::Message>::default_instance()
    }
}

impl OptimizerOptions {
    pub fn new() -> OptimizerOptions {
        ::std::default::Default::default()
    }

    // bool do_common_subexpression_elimination = 1;


    pub fn get_do_common_subexpression_elimination(&self) -> bool {
        self.do_common_subexpression_elimination
    }
    pub fn clear_do_common_subexpression_elimination(&mut self) {
        self.do_common_subexpression_elimination = false;
    }

    // Param is passed by value, moved
    pub fn set_do_common_subexpression_elimination(&mut self, v: bool) {
        self.do_common_subexpression_elimination = v;
    }

    // bool do_constant_folding = 2;


    pub fn get_do_constant_folding(&self) -> bool {
        self.do_constant_folding
    }
    pub fn clear_do_constant_folding(&mut self) {
        self.do_constant_folding = false;
    }

    // Param is passed by value, moved
    pub fn set_do_constant_folding(&mut self, v: bool) {
        self.do_constant_folding = v;
    }

    // int64 max_folded_constant_in_bytes = 6;


    pub fn get_max_folded_constant_in_bytes(&self) -> i64 {
        self.max_folded_constant_in_bytes
    }
    pub fn clear_max_folded_constant_in_bytes(&mut self) {
        self.max_folded_constant_in_bytes = 0;
    }

    // Param is passed by value, moved
    pub fn set_max_folded_constant_in_bytes(&mut self, v: i64) {
        self.max_folded_constant_in_bytes = v;
    }

    // bool do_function_inlining = 4;


    pub fn get_do_function_inlining(&self) -> bool {
        self.do_function_inlining
    }
    pub fn clear_do_function_inlining(&mut self) {
        self.do_function_inlining = false;
    }

    // Param is passed by value, moved
    pub fn set_do_function_inlining(&mut self, v: bool) {
        self.do_function_inlining = v;
    }

    // .tensorflow.OptimizerOptions.Level opt_level = 3;


    pub fn get_opt_level(&self) -> OptimizerOptions_Level {
        self.opt_level
    }
    pub fn clear_opt_level(&mut self) {
        self.opt_level = OptimizerOptions_Level::L1;
    }

    // Param is passed by value, moved
    pub fn set_opt_level(&mut self, v: OptimizerOptions_Level) {
        self.opt_level = v;
    }

    // .tensorflow.OptimizerOptions.GlobalJitLevel global_jit_level = 5;


    pub fn get_global_jit_level(&self) -> OptimizerOptions_GlobalJitLevel {
        self.global_jit_level
    }
    pub fn clear_global_jit_level(&mut self) {
        self.global_jit_level = OptimizerOptions_GlobalJitLevel::DEFAULT;
    }

    // Param is passed by value, moved
    pub fn set_global_jit_level(&mut self, v: OptimizerOptions_GlobalJitLevel) {
        self.global_jit_level = v;
    }
}

impl ::protobuf::Message for OptimizerOptions {
    fn is_initialized(&self) -> bool {
        true
    }

    fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        while !is.eof()? {
            let (field_number, wire_type) = is.read_tag_unpack()?;
            match field_number {
                1 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.do_common_subexpression_elimination = tmp;
                },
                2 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.do_constant_folding = tmp;
                },
                6 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int64()?;
                    self.max_folded_constant_in_bytes = tmp;
                },
                4 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.do_function_inlining = tmp;
                },
                3 => {
                    ::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.opt_level, 3, &mut self.unknown_fields)?
                },
                5 => {
                    ::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.global_jit_level, 5, &mut self.unknown_fields)?
                },
                _ => {
                    ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
                },
            };
        }
        ::std::result::Result::Ok(())
    }

    // Compute sizes of nested messages
    #[allow(unused_variables)]
    fn compute_size(&self) -> u32 {
        let mut my_size = 0;
        if self.do_common_subexpression_elimination != false {
            my_size += 2;
        }
        if self.do_constant_folding != false {
            my_size += 2;
        }
        if self.max_folded_constant_in_bytes != 0 {
            my_size += ::protobuf::rt::value_size(6, self.max_folded_constant_in_bytes, ::protobuf::wire_format::WireTypeVarint);
        }
        if self.do_function_inlining != false {
            my_size += 2;
        }
        if self.opt_level != OptimizerOptions_Level::L1 {
            my_size += ::protobuf::rt::enum_size(3, self.opt_level);
        }
        if self.global_jit_level != OptimizerOptions_GlobalJitLevel::DEFAULT {
            my_size += ::protobuf::rt::enum_size(5, self.global_jit_level);
        }
        my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
        self.cached_size.set(my_size);
        my_size
    }

    fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        if self.do_common_subexpression_elimination != false {
            os.write_bool(1, self.do_common_subexpression_elimination)?;
        }
        if self.do_constant_folding != false {
            os.write_bool(2, self.do_constant_folding)?;
        }
        if self.max_folded_constant_in_bytes != 0 {
            os.write_int64(6, self.max_folded_constant_in_bytes)?;
        }
        if self.do_function_inlining != false {
            os.write_bool(4, self.do_function_inlining)?;
        }
        if self.opt_level != OptimizerOptions_Level::L1 {
            os.write_enum(3, self.opt_level.value())?;
        }
        if self.global_jit_level != OptimizerOptions_GlobalJitLevel::DEFAULT {
            os.write_enum(5, self.global_jit_level.value())?;
        }
        os.write_unknown_fields(self.get_unknown_fields())?;
        ::std::result::Result::Ok(())
    }

    fn get_cached_size(&self) -> u32 {
        self.cached_size.get()
    }

    fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
        &self.unknown_fields
    }

    fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
        &mut self.unknown_fields
    }

    fn as_any(&self) -> &dyn (::std::any::Any) {
        self as &dyn (::std::any::Any)
    }
    fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
        self as &mut dyn (::std::any::Any)
    }
    fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
        self
    }

    fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
        Self::descriptor_static()
    }

    fn new() -> OptimizerOptions {
        OptimizerOptions::new()
    }

    fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                let mut fields = ::std::vec::Vec::new();
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "do_common_subexpression_elimination",
                    |m: &OptimizerOptions| { &m.do_common_subexpression_elimination },
                    |m: &mut OptimizerOptions| { &mut m.do_common_subexpression_elimination },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "do_constant_folding",
                    |m: &OptimizerOptions| { &m.do_constant_folding },
                    |m: &mut OptimizerOptions| { &mut m.do_constant_folding },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
                    "max_folded_constant_in_bytes",
                    |m: &OptimizerOptions| { &m.max_folded_constant_in_bytes },
                    |m: &mut OptimizerOptions| { &mut m.max_folded_constant_in_bytes },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "do_function_inlining",
                    |m: &OptimizerOptions| { &m.do_function_inlining },
                    |m: &mut OptimizerOptions| { &mut m.do_function_inlining },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum<OptimizerOptions_Level>>(
                    "opt_level",
                    |m: &OptimizerOptions| { &m.opt_level },
                    |m: &mut OptimizerOptions| { &mut m.opt_level },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum<OptimizerOptions_GlobalJitLevel>>(
                    "global_jit_level",
                    |m: &OptimizerOptions| { &m.global_jit_level },
                    |m: &mut OptimizerOptions| { &mut m.global_jit_level },
                ));
                ::protobuf::reflect::MessageDescriptor::new_pb_name::<OptimizerOptions>(
                    "OptimizerOptions",
                    fields,
                    file_descriptor_proto()
                )
            })
        }
    }

    fn default_instance() -> &'static OptimizerOptions {
        static mut instance: ::protobuf::lazy::Lazy<OptimizerOptions> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            instance.get(OptimizerOptions::new)
        }
    }
}

impl ::protobuf::Clear for OptimizerOptions {
    fn clear(&mut self) {
        self.do_common_subexpression_elimination = false;
        self.do_constant_folding = false;
        self.max_folded_constant_in_bytes = 0;
        self.do_function_inlining = false;
        self.opt_level = OptimizerOptions_Level::L1;
        self.global_jit_level = OptimizerOptions_GlobalJitLevel::DEFAULT;
        self.unknown_fields.clear();
    }
}

impl ::std::fmt::Debug for OptimizerOptions {
    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        ::protobuf::text_format::fmt(self, f)
    }
}

impl ::protobuf::reflect::ProtobufValue for OptimizerOptions {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Message(self)
    }
}

#[derive(Clone,PartialEq,Eq,Debug,Hash)]
pub enum OptimizerOptions_Level {
    L1 = 0,
    L0 = -1,
}

impl ::protobuf::ProtobufEnum for OptimizerOptions_Level {
    fn value(&self) -> i32 {
        *self as i32
    }

    fn from_i32(value: i32) -> ::std::option::Option<OptimizerOptions_Level> {
        match value {
            0 => ::std::option::Option::Some(OptimizerOptions_Level::L1),
            -1 => ::std::option::Option::Some(OptimizerOptions_Level::L0),
            _ => ::std::option::Option::None
        }
    }

    fn values() -> &'static [Self] {
        static values: &'static [OptimizerOptions_Level] = &[
            OptimizerOptions_Level::L1,
            OptimizerOptions_Level::L0,
        ];
        values
    }

    fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::EnumDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                ::protobuf::reflect::EnumDescriptor::new_pb_name::<OptimizerOptions_Level>("OptimizerOptions.Level", file_descriptor_proto())
            })
        }
    }
}

impl ::std::marker::Copy for OptimizerOptions_Level {
}

impl ::std::default::Default for OptimizerOptions_Level {
    fn default() -> Self {
        OptimizerOptions_Level::L1
    }
}

impl ::protobuf::reflect::ProtobufValue for OptimizerOptions_Level {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Enum(self.descriptor())
    }
}

#[derive(Clone,PartialEq,Eq,Debug,Hash)]
pub enum OptimizerOptions_GlobalJitLevel {
    DEFAULT = 0,
    OFF = -1,
    ON_1 = 1,
    ON_2 = 2,
}

impl ::protobuf::ProtobufEnum for OptimizerOptions_GlobalJitLevel {
    fn value(&self) -> i32 {
        *self as i32
    }

    fn from_i32(value: i32) -> ::std::option::Option<OptimizerOptions_GlobalJitLevel> {
        match value {
            0 => ::std::option::Option::Some(OptimizerOptions_GlobalJitLevel::DEFAULT),
            -1 => ::std::option::Option::Some(OptimizerOptions_GlobalJitLevel::OFF),
            1 => ::std::option::Option::Some(OptimizerOptions_GlobalJitLevel::ON_1),
            2 => ::std::option::Option::Some(OptimizerOptions_GlobalJitLevel::ON_2),
            _ => ::std::option::Option::None
        }
    }

    fn values() -> &'static [Self] {
        static values: &'static [OptimizerOptions_GlobalJitLevel] = &[
            OptimizerOptions_GlobalJitLevel::DEFAULT,
            OptimizerOptions_GlobalJitLevel::OFF,
            OptimizerOptions_GlobalJitLevel::ON_1,
            OptimizerOptions_GlobalJitLevel::ON_2,
        ];
        values
    }

    fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::EnumDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                ::protobuf::reflect::EnumDescriptor::new_pb_name::<OptimizerOptions_GlobalJitLevel>("OptimizerOptions.GlobalJitLevel", file_descriptor_proto())
            })
        }
    }
}

impl ::std::marker::Copy for OptimizerOptions_GlobalJitLevel {
}

impl ::std::default::Default for OptimizerOptions_GlobalJitLevel {
    fn default() -> Self {
        OptimizerOptions_GlobalJitLevel::DEFAULT
    }
}

impl ::protobuf::reflect::ProtobufValue for OptimizerOptions_GlobalJitLevel {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Enum(self.descriptor())
    }
}

#[derive(PartialEq,Clone,Default)]
pub struct GraphOptions {
    // message fields
    pub enable_recv_scheduling: bool,
    pub optimizer_options: ::protobuf::SingularPtrField<OptimizerOptions>,
    pub build_cost_model: i64,
    pub build_cost_model_after: i64,
    pub infer_shapes: bool,
    pub place_pruned_graph: bool,
    pub enable_bfloat16_sendrecv: bool,
    pub timeline_step: i32,
    pub rewrite_options: ::protobuf::SingularPtrField<super::rewriter_config::RewriterConfig>,
    // special fields
    pub unknown_fields: ::protobuf::UnknownFields,
    pub cached_size: ::protobuf::CachedSize,
}

impl<'a> ::std::default::Default for &'a GraphOptions {
    fn default() -> &'a GraphOptions {
        <GraphOptions as ::protobuf::Message>::default_instance()
    }
}

impl GraphOptions {
    pub fn new() -> GraphOptions {
        ::std::default::Default::default()
    }

    // bool enable_recv_scheduling = 2;


    pub fn get_enable_recv_scheduling(&self) -> bool {
        self.enable_recv_scheduling
    }
    pub fn clear_enable_recv_scheduling(&mut self) {
        self.enable_recv_scheduling = false;
    }

    // Param is passed by value, moved
    pub fn set_enable_recv_scheduling(&mut self, v: bool) {
        self.enable_recv_scheduling = v;
    }

    // .tensorflow.OptimizerOptions optimizer_options = 3;


    pub fn get_optimizer_options(&self) -> &OptimizerOptions {
        self.optimizer_options.as_ref().unwrap_or_else(|| OptimizerOptions::default_instance())
    }
    pub fn clear_optimizer_options(&mut self) {
        self.optimizer_options.clear();
    }

    pub fn has_optimizer_options(&self) -> bool {
        self.optimizer_options.is_some()
    }

    // Param is passed by value, moved
    pub fn set_optimizer_options(&mut self, v: OptimizerOptions) {
        self.optimizer_options = ::protobuf::SingularPtrField::some(v);
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_optimizer_options(&mut self) -> &mut OptimizerOptions {
        if self.optimizer_options.is_none() {
            self.optimizer_options.set_default();
        }
        self.optimizer_options.as_mut().unwrap()
    }

    // Take field
    pub fn take_optimizer_options(&mut self) -> OptimizerOptions {
        self.optimizer_options.take().unwrap_or_else(|| OptimizerOptions::new())
    }

    // int64 build_cost_model = 4;


    pub fn get_build_cost_model(&self) -> i64 {
        self.build_cost_model
    }
    pub fn clear_build_cost_model(&mut self) {
        self.build_cost_model = 0;
    }

    // Param is passed by value, moved
    pub fn set_build_cost_model(&mut self, v: i64) {
        self.build_cost_model = v;
    }

    // int64 build_cost_model_after = 9;


    pub fn get_build_cost_model_after(&self) -> i64 {
        self.build_cost_model_after
    }
    pub fn clear_build_cost_model_after(&mut self) {
        self.build_cost_model_after = 0;
    }

    // Param is passed by value, moved
    pub fn set_build_cost_model_after(&mut self, v: i64) {
        self.build_cost_model_after = v;
    }

    // bool infer_shapes = 5;


    pub fn get_infer_shapes(&self) -> bool {
        self.infer_shapes
    }
    pub fn clear_infer_shapes(&mut self) {
        self.infer_shapes = false;
    }

    // Param is passed by value, moved
    pub fn set_infer_shapes(&mut self, v: bool) {
        self.infer_shapes = v;
    }

    // bool place_pruned_graph = 6;


    pub fn get_place_pruned_graph(&self) -> bool {
        self.place_pruned_graph
    }
    pub fn clear_place_pruned_graph(&mut self) {
        self.place_pruned_graph = false;
    }

    // Param is passed by value, moved
    pub fn set_place_pruned_graph(&mut self, v: bool) {
        self.place_pruned_graph = v;
    }

    // bool enable_bfloat16_sendrecv = 7;


    pub fn get_enable_bfloat16_sendrecv(&self) -> bool {
        self.enable_bfloat16_sendrecv
    }
    pub fn clear_enable_bfloat16_sendrecv(&mut self) {
        self.enable_bfloat16_sendrecv = false;
    }

    // Param is passed by value, moved
    pub fn set_enable_bfloat16_sendrecv(&mut self, v: bool) {
        self.enable_bfloat16_sendrecv = v;
    }

    // int32 timeline_step = 8;


    pub fn get_timeline_step(&self) -> i32 {
        self.timeline_step
    }
    pub fn clear_timeline_step(&mut self) {
        self.timeline_step = 0;
    }

    // Param is passed by value, moved
    pub fn set_timeline_step(&mut self, v: i32) {
        self.timeline_step = v;
    }

    // .tensorflow.RewriterConfig rewrite_options = 10;


    pub fn get_rewrite_options(&self) -> &super::rewriter_config::RewriterConfig {
        self.rewrite_options.as_ref().unwrap_or_else(|| super::rewriter_config::RewriterConfig::default_instance())
    }
    pub fn clear_rewrite_options(&mut self) {
        self.rewrite_options.clear();
    }

    pub fn has_rewrite_options(&self) -> bool {
        self.rewrite_options.is_some()
    }

    // Param is passed by value, moved
    pub fn set_rewrite_options(&mut self, v: super::rewriter_config::RewriterConfig) {
        self.rewrite_options = ::protobuf::SingularPtrField::some(v);
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_rewrite_options(&mut self) -> &mut super::rewriter_config::RewriterConfig {
        if self.rewrite_options.is_none() {
            self.rewrite_options.set_default();
        }
        self.rewrite_options.as_mut().unwrap()
    }

    // Take field
    pub fn take_rewrite_options(&mut self) -> super::rewriter_config::RewriterConfig {
        self.rewrite_options.take().unwrap_or_else(|| super::rewriter_config::RewriterConfig::new())
    }
}

impl ::protobuf::Message for GraphOptions {
    fn is_initialized(&self) -> bool {
        for v in &self.optimizer_options {
            if !v.is_initialized() {
                return false;
            }
        };
        for v in &self.rewrite_options {
            if !v.is_initialized() {
                return false;
            }
        };
        true
    }

    fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        while !is.eof()? {
            let (field_number, wire_type) = is.read_tag_unpack()?;
            match field_number {
                2 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.enable_recv_scheduling = tmp;
                },
                3 => {
                    ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.optimizer_options)?;
                },
                4 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int64()?;
                    self.build_cost_model = tmp;
                },
                9 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int64()?;
                    self.build_cost_model_after = tmp;
                },
                5 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.infer_shapes = tmp;
                },
                6 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.place_pruned_graph = tmp;
                },
                7 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.enable_bfloat16_sendrecv = tmp;
                },
                8 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int32()?;
                    self.timeline_step = tmp;
                },
                10 => {
                    ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.rewrite_options)?;
                },
                _ => {
                    ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
                },
            };
        }
        ::std::result::Result::Ok(())
    }

    // Compute sizes of nested messages
    #[allow(unused_variables)]
    fn compute_size(&self) -> u32 {
        let mut my_size = 0;
        if self.enable_recv_scheduling != false {
            my_size += 2;
        }
        if let Some(ref v) = self.optimizer_options.as_ref() {
            let len = v.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        }
        if self.build_cost_model != 0 {
            my_size += ::protobuf::rt::value_size(4, self.build_cost_model, ::protobuf::wire_format::WireTypeVarint);
        }
        if self.build_cost_model_after != 0 {
            my_size += ::protobuf::rt::value_size(9, self.build_cost_model_after, ::protobuf::wire_format::WireTypeVarint);
        }
        if self.infer_shapes != false {
            my_size += 2;
        }
        if self.place_pruned_graph != false {
            my_size += 2;
        }
        if self.enable_bfloat16_sendrecv != false {
            my_size += 2;
        }
        if self.timeline_step != 0 {
            my_size += ::protobuf::rt::value_size(8, self.timeline_step, ::protobuf::wire_format::WireTypeVarint);
        }
        if let Some(ref v) = self.rewrite_options.as_ref() {
            let len = v.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        }
        my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
        self.cached_size.set(my_size);
        my_size
    }

    fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        if self.enable_recv_scheduling != false {
            os.write_bool(2, self.enable_recv_scheduling)?;
        }
        if let Some(ref v) = self.optimizer_options.as_ref() {
            os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        }
        if self.build_cost_model != 0 {
            os.write_int64(4, self.build_cost_model)?;
        }
        if self.build_cost_model_after != 0 {
            os.write_int64(9, self.build_cost_model_after)?;
        }
        if self.infer_shapes != false {
            os.write_bool(5, self.infer_shapes)?;
        }
        if self.place_pruned_graph != false {
            os.write_bool(6, self.place_pruned_graph)?;
        }
        if self.enable_bfloat16_sendrecv != false {
            os.write_bool(7, self.enable_bfloat16_sendrecv)?;
        }
        if self.timeline_step != 0 {
            os.write_int32(8, self.timeline_step)?;
        }
        if let Some(ref v) = self.rewrite_options.as_ref() {
            os.write_tag(10, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        }
        os.write_unknown_fields(self.get_unknown_fields())?;
        ::std::result::Result::Ok(())
    }

    fn get_cached_size(&self) -> u32 {
        self.cached_size.get()
    }

    fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
        &self.unknown_fields
    }

    fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
        &mut self.unknown_fields
    }

    fn as_any(&self) -> &dyn (::std::any::Any) {
        self as &dyn (::std::any::Any)
    }
    fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
        self as &mut dyn (::std::any::Any)
    }
    fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
        self
    }

    fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
        Self::descriptor_static()
    }

    fn new() -> GraphOptions {
        GraphOptions::new()
    }

    fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                let mut fields = ::std::vec::Vec::new();
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "enable_recv_scheduling",
                    |m: &GraphOptions| { &m.enable_recv_scheduling },
                    |m: &mut GraphOptions| { &mut m.enable_recv_scheduling },
                ));
                fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<OptimizerOptions>>(
                    "optimizer_options",
                    |m: &GraphOptions| { &m.optimizer_options },
                    |m: &mut GraphOptions| { &mut m.optimizer_options },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
                    "build_cost_model",
                    |m: &GraphOptions| { &m.build_cost_model },
                    |m: &mut GraphOptions| { &mut m.build_cost_model },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
                    "build_cost_model_after",
                    |m: &GraphOptions| { &m.build_cost_model_after },
                    |m: &mut GraphOptions| { &mut m.build_cost_model_after },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "infer_shapes",
                    |m: &GraphOptions| { &m.infer_shapes },
                    |m: &mut GraphOptions| { &mut m.infer_shapes },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "place_pruned_graph",
                    |m: &GraphOptions| { &m.place_pruned_graph },
                    |m: &mut GraphOptions| { &mut m.place_pruned_graph },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "enable_bfloat16_sendrecv",
                    |m: &GraphOptions| { &m.enable_bfloat16_sendrecv },
                    |m: &mut GraphOptions| { &mut m.enable_bfloat16_sendrecv },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
                    "timeline_step",
                    |m: &GraphOptions| { &m.timeline_step },
                    |m: &mut GraphOptions| { &mut m.timeline_step },
                ));
                fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::rewriter_config::RewriterConfig>>(
                    "rewrite_options",
                    |m: &GraphOptions| { &m.rewrite_options },
                    |m: &mut GraphOptions| { &mut m.rewrite_options },
                ));
                ::protobuf::reflect::MessageDescriptor::new_pb_name::<GraphOptions>(
                    "GraphOptions",
                    fields,
                    file_descriptor_proto()
                )
            })
        }
    }

    fn default_instance() -> &'static GraphOptions {
        static mut instance: ::protobuf::lazy::Lazy<GraphOptions> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            instance.get(GraphOptions::new)
        }
    }
}

impl ::protobuf::Clear for GraphOptions {
    fn clear(&mut self) {
        self.enable_recv_scheduling = false;
        self.optimizer_options.clear();
        self.build_cost_model = 0;
        self.build_cost_model_after = 0;
        self.infer_shapes = false;
        self.place_pruned_graph = false;
        self.enable_bfloat16_sendrecv = false;
        self.timeline_step = 0;
        self.rewrite_options.clear();
        self.unknown_fields.clear();
    }
}

impl ::std::fmt::Debug for GraphOptions {
    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        ::protobuf::text_format::fmt(self, f)
    }
}

impl ::protobuf::reflect::ProtobufValue for GraphOptions {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Message(self)
    }
}

#[derive(PartialEq,Clone,Default)]
pub struct ThreadPoolOptionProto {
    // message fields
    pub num_threads: i32,
    pub global_name: ::std::string::String,
    // special fields
    pub unknown_fields: ::protobuf::UnknownFields,
    pub cached_size: ::protobuf::CachedSize,
}

impl<'a> ::std::default::Default for &'a ThreadPoolOptionProto {
    fn default() -> &'a ThreadPoolOptionProto {
        <ThreadPoolOptionProto as ::protobuf::Message>::default_instance()
    }
}

impl ThreadPoolOptionProto {
    pub fn new() -> ThreadPoolOptionProto {
        ::std::default::Default::default()
    }

    // int32 num_threads = 1;


    pub fn get_num_threads(&self) -> i32 {
        self.num_threads
    }
    pub fn clear_num_threads(&mut self) {
        self.num_threads = 0;
    }

    // Param is passed by value, moved
    pub fn set_num_threads(&mut self, v: i32) {
        self.num_threads = v;
    }

    // string global_name = 2;


    pub fn get_global_name(&self) -> &str {
        &self.global_name
    }
    pub fn clear_global_name(&mut self) {
        self.global_name.clear();
    }

    // Param is passed by value, moved
    pub fn set_global_name(&mut self, v: ::std::string::String) {
        self.global_name = v;
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_global_name(&mut self) -> &mut ::std::string::String {
        &mut self.global_name
    }

    // Take field
    pub fn take_global_name(&mut self) -> ::std::string::String {
        ::std::mem::replace(&mut self.global_name, ::std::string::String::new())
    }
}

impl ::protobuf::Message for ThreadPoolOptionProto {
    fn is_initialized(&self) -> bool {
        true
    }

    fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        while !is.eof()? {
            let (field_number, wire_type) = is.read_tag_unpack()?;
            match field_number {
                1 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int32()?;
                    self.num_threads = tmp;
                },
                2 => {
                    ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.global_name)?;
                },
                _ => {
                    ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
                },
            };
        }
        ::std::result::Result::Ok(())
    }

    // Compute sizes of nested messages
    #[allow(unused_variables)]
    fn compute_size(&self) -> u32 {
        let mut my_size = 0;
        if self.num_threads != 0 {
            my_size += ::protobuf::rt::value_size(1, self.num_threads, ::protobuf::wire_format::WireTypeVarint);
        }
        if !self.global_name.is_empty() {
            my_size += ::protobuf::rt::string_size(2, &self.global_name);
        }
        my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
        self.cached_size.set(my_size);
        my_size
    }

    fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        if self.num_threads != 0 {
            os.write_int32(1, self.num_threads)?;
        }
        if !self.global_name.is_empty() {
            os.write_string(2, &self.global_name)?;
        }
        os.write_unknown_fields(self.get_unknown_fields())?;
        ::std::result::Result::Ok(())
    }

    fn get_cached_size(&self) -> u32 {
        self.cached_size.get()
    }

    fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
        &self.unknown_fields
    }

    fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
        &mut self.unknown_fields
    }

    fn as_any(&self) -> &dyn (::std::any::Any) {
        self as &dyn (::std::any::Any)
    }
    fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
        self as &mut dyn (::std::any::Any)
    }
    fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
        self
    }

    fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
        Self::descriptor_static()
    }

    fn new() -> ThreadPoolOptionProto {
        ThreadPoolOptionProto::new()
    }

    fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                let mut fields = ::std::vec::Vec::new();
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
                    "num_threads",
                    |m: &ThreadPoolOptionProto| { &m.num_threads },
                    |m: &mut ThreadPoolOptionProto| { &mut m.num_threads },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
                    "global_name",
                    |m: &ThreadPoolOptionProto| { &m.global_name },
                    |m: &mut ThreadPoolOptionProto| { &mut m.global_name },
                ));
                ::protobuf::reflect::MessageDescriptor::new_pb_name::<ThreadPoolOptionProto>(
                    "ThreadPoolOptionProto",
                    fields,
                    file_descriptor_proto()
                )
            })
        }
    }

    fn default_instance() -> &'static ThreadPoolOptionProto {
        static mut instance: ::protobuf::lazy::Lazy<ThreadPoolOptionProto> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            instance.get(ThreadPoolOptionProto::new)
        }
    }
}

impl ::protobuf::Clear for ThreadPoolOptionProto {
    fn clear(&mut self) {
        self.num_threads = 0;
        self.global_name.clear();
        self.unknown_fields.clear();
    }
}

impl ::std::fmt::Debug for ThreadPoolOptionProto {
    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        ::protobuf::text_format::fmt(self, f)
    }
}

impl ::protobuf::reflect::ProtobufValue for ThreadPoolOptionProto {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Message(self)
    }
}

#[derive(PartialEq,Clone,Default)]
pub struct RPCOptions {
    // message fields
    pub use_rpc_for_inprocess_master: bool,
    pub compression_algorithm: ::std::string::String,
    pub compression_level: i32,
    pub cache_rpc_response: bool,
    pub disable_session_connection_sharing: bool,
    // special fields
    pub unknown_fields: ::protobuf::UnknownFields,
    pub cached_size: ::protobuf::CachedSize,
}

impl<'a> ::std::default::Default for &'a RPCOptions {
    fn default() -> &'a RPCOptions {
        <RPCOptions as ::protobuf::Message>::default_instance()
    }
}

impl RPCOptions {
    pub fn new() -> RPCOptions {
        ::std::default::Default::default()
    }

    // bool use_rpc_for_inprocess_master = 1;


    pub fn get_use_rpc_for_inprocess_master(&self) -> bool {
        self.use_rpc_for_inprocess_master
    }
    pub fn clear_use_rpc_for_inprocess_master(&mut self) {
        self.use_rpc_for_inprocess_master = false;
    }

    // Param is passed by value, moved
    pub fn set_use_rpc_for_inprocess_master(&mut self, v: bool) {
        self.use_rpc_for_inprocess_master = v;
    }

    // string compression_algorithm = 2;


    pub fn get_compression_algorithm(&self) -> &str {
        &self.compression_algorithm
    }
    pub fn clear_compression_algorithm(&mut self) {
        self.compression_algorithm.clear();
    }

    // Param is passed by value, moved
    pub fn set_compression_algorithm(&mut self, v: ::std::string::String) {
        self.compression_algorithm = v;
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_compression_algorithm(&mut self) -> &mut ::std::string::String {
        &mut self.compression_algorithm
    }

    // Take field
    pub fn take_compression_algorithm(&mut self) -> ::std::string::String {
        ::std::mem::replace(&mut self.compression_algorithm, ::std::string::String::new())
    }

    // int32 compression_level = 3;


    pub fn get_compression_level(&self) -> i32 {
        self.compression_level
    }
    pub fn clear_compression_level(&mut self) {
        self.compression_level = 0;
    }

    // Param is passed by value, moved
    pub fn set_compression_level(&mut self, v: i32) {
        self.compression_level = v;
    }

    // bool cache_rpc_response = 4;


    pub fn get_cache_rpc_response(&self) -> bool {
        self.cache_rpc_response
    }
    pub fn clear_cache_rpc_response(&mut self) {
        self.cache_rpc_response = false;
    }

    // Param is passed by value, moved
    pub fn set_cache_rpc_response(&mut self, v: bool) {
        self.cache_rpc_response = v;
    }

    // bool disable_session_connection_sharing = 5;


    pub fn get_disable_session_connection_sharing(&self) -> bool {
        self.disable_session_connection_sharing
    }
    pub fn clear_disable_session_connection_sharing(&mut self) {
        self.disable_session_connection_sharing = false;
    }

    // Param is passed by value, moved
    pub fn set_disable_session_connection_sharing(&mut self, v: bool) {
        self.disable_session_connection_sharing = v;
    }
}

impl ::protobuf::Message for RPCOptions {
    fn is_initialized(&self) -> bool {
        true
    }

    fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        while !is.eof()? {
            let (field_number, wire_type) = is.read_tag_unpack()?;
            match field_number {
                1 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.use_rpc_for_inprocess_master = tmp;
                },
                2 => {
                    ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.compression_algorithm)?;
                },
                3 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int32()?;
                    self.compression_level = tmp;
                },
                4 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.cache_rpc_response = tmp;
                },
                5 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.disable_session_connection_sharing = tmp;
                },
                _ => {
                    ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
                },
            };
        }
        ::std::result::Result::Ok(())
    }

    // Compute sizes of nested messages
    #[allow(unused_variables)]
    fn compute_size(&self) -> u32 {
        let mut my_size = 0;
        if self.use_rpc_for_inprocess_master != false {
            my_size += 2;
        }
        if !self.compression_algorithm.is_empty() {
            my_size += ::protobuf::rt::string_size(2, &self.compression_algorithm);
        }
        if self.compression_level != 0 {
            my_size += ::protobuf::rt::value_size(3, self.compression_level, ::protobuf::wire_format::WireTypeVarint);
        }
        if self.cache_rpc_response != false {
            my_size += 2;
        }
        if self.disable_session_connection_sharing != false {
            my_size += 2;
        }
        my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
        self.cached_size.set(my_size);
        my_size
    }

    fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        if self.use_rpc_for_inprocess_master != false {
            os.write_bool(1, self.use_rpc_for_inprocess_master)?;
        }
        if !self.compression_algorithm.is_empty() {
            os.write_string(2, &self.compression_algorithm)?;
        }
        if self.compression_level != 0 {
            os.write_int32(3, self.compression_level)?;
        }
        if self.cache_rpc_response != false {
            os.write_bool(4, self.cache_rpc_response)?;
        }
        if self.disable_session_connection_sharing != false {
            os.write_bool(5, self.disable_session_connection_sharing)?;
        }
        os.write_unknown_fields(self.get_unknown_fields())?;
        ::std::result::Result::Ok(())
    }

    fn get_cached_size(&self) -> u32 {
        self.cached_size.get()
    }

    fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
        &self.unknown_fields
    }

    fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
        &mut self.unknown_fields
    }

    fn as_any(&self) -> &dyn (::std::any::Any) {
        self as &dyn (::std::any::Any)
    }
    fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
        self as &mut dyn (::std::any::Any)
    }
    fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
        self
    }

    fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
        Self::descriptor_static()
    }

    fn new() -> RPCOptions {
        RPCOptions::new()
    }

    fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                let mut fields = ::std::vec::Vec::new();
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "use_rpc_for_inprocess_master",
                    |m: &RPCOptions| { &m.use_rpc_for_inprocess_master },
                    |m: &mut RPCOptions| { &mut m.use_rpc_for_inprocess_master },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
                    "compression_algorithm",
                    |m: &RPCOptions| { &m.compression_algorithm },
                    |m: &mut RPCOptions| { &mut m.compression_algorithm },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
                    "compression_level",
                    |m: &RPCOptions| { &m.compression_level },
                    |m: &mut RPCOptions| { &mut m.compression_level },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "cache_rpc_response",
                    |m: &RPCOptions| { &m.cache_rpc_response },
                    |m: &mut RPCOptions| { &mut m.cache_rpc_response },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "disable_session_connection_sharing",
                    |m: &RPCOptions| { &m.disable_session_connection_sharing },
                    |m: &mut RPCOptions| { &mut m.disable_session_connection_sharing },
                ));
                ::protobuf::reflect::MessageDescriptor::new_pb_name::<RPCOptions>(
                    "RPCOptions",
                    fields,
                    file_descriptor_proto()
                )
            })
        }
    }

    fn default_instance() -> &'static RPCOptions {
        static mut instance: ::protobuf::lazy::Lazy<RPCOptions> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            instance.get(RPCOptions::new)
        }
    }
}

impl ::protobuf::Clear for RPCOptions {
    fn clear(&mut self) {
        self.use_rpc_for_inprocess_master = false;
        self.compression_algorithm.clear();
        self.compression_level = 0;
        self.cache_rpc_response = false;
        self.disable_session_connection_sharing = false;
        self.unknown_fields.clear();
    }
}

impl ::std::fmt::Debug for RPCOptions {
    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        ::protobuf::text_format::fmt(self, f)
    }
}

impl ::protobuf::reflect::ProtobufValue for RPCOptions {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Message(self)
    }
}

#[derive(PartialEq,Clone,Default)]
pub struct SessionMetadata {
    // message fields
    pub name: ::std::string::String,
    pub version: i64,
    // special fields
    pub unknown_fields: ::protobuf::UnknownFields,
    pub cached_size: ::protobuf::CachedSize,
}

impl<'a> ::std::default::Default for &'a SessionMetadata {
    fn default() -> &'a SessionMetadata {
        <SessionMetadata as ::protobuf::Message>::default_instance()
    }
}

impl SessionMetadata {
    pub fn new() -> SessionMetadata {
        ::std::default::Default::default()
    }

    // string name = 1;


    pub fn get_name(&self) -> &str {
        &self.name
    }
    pub fn clear_name(&mut self) {
        self.name.clear();
    }

    // Param is passed by value, moved
    pub fn set_name(&mut self, v: ::std::string::String) {
        self.name = v;
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_name(&mut self) -> &mut ::std::string::String {
        &mut self.name
    }

    // Take field
    pub fn take_name(&mut self) -> ::std::string::String {
        ::std::mem::replace(&mut self.name, ::std::string::String::new())
    }

    // int64 version = 2;


    pub fn get_version(&self) -> i64 {
        self.version
    }
    pub fn clear_version(&mut self) {
        self.version = 0;
    }

    // Param is passed by value, moved
    pub fn set_version(&mut self, v: i64) {
        self.version = v;
    }
}

impl ::protobuf::Message for SessionMetadata {
    fn is_initialized(&self) -> bool {
        true
    }

    fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        while !is.eof()? {
            let (field_number, wire_type) = is.read_tag_unpack()?;
            match field_number {
                1 => {
                    ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.name)?;
                },
                2 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int64()?;
                    self.version = tmp;
                },
                _ => {
                    ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
                },
            };
        }
        ::std::result::Result::Ok(())
    }

    // Compute sizes of nested messages
    #[allow(unused_variables)]
    fn compute_size(&self) -> u32 {
        let mut my_size = 0;
        if !self.name.is_empty() {
            my_size += ::protobuf::rt::string_size(1, &self.name);
        }
        if self.version != 0 {
            my_size += ::protobuf::rt::value_size(2, self.version, ::protobuf::wire_format::WireTypeVarint);
        }
        my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
        self.cached_size.set(my_size);
        my_size
    }

    fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        if !self.name.is_empty() {
            os.write_string(1, &self.name)?;
        }
        if self.version != 0 {
            os.write_int64(2, self.version)?;
        }
        os.write_unknown_fields(self.get_unknown_fields())?;
        ::std::result::Result::Ok(())
    }

    fn get_cached_size(&self) -> u32 {
        self.cached_size.get()
    }

    fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
        &self.unknown_fields
    }

    fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
        &mut self.unknown_fields
    }

    fn as_any(&self) -> &dyn (::std::any::Any) {
        self as &dyn (::std::any::Any)
    }
    fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
        self as &mut dyn (::std::any::Any)
    }
    fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
        self
    }

    fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
        Self::descriptor_static()
    }

    fn new() -> SessionMetadata {
        SessionMetadata::new()
    }

    fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                let mut fields = ::std::vec::Vec::new();
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
                    "name",
                    |m: &SessionMetadata| { &m.name },
                    |m: &mut SessionMetadata| { &mut m.name },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
                    "version",
                    |m: &SessionMetadata| { &m.version },
                    |m: &mut SessionMetadata| { &mut m.version },
                ));
                ::protobuf::reflect::MessageDescriptor::new_pb_name::<SessionMetadata>(
                    "SessionMetadata",
                    fields,
                    file_descriptor_proto()
                )
            })
        }
    }

    fn default_instance() -> &'static SessionMetadata {
        static mut instance: ::protobuf::lazy::Lazy<SessionMetadata> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            instance.get(SessionMetadata::new)
        }
    }
}

impl ::protobuf::Clear for SessionMetadata {
    fn clear(&mut self) {
        self.name.clear();
        self.version = 0;
        self.unknown_fields.clear();
    }
}

impl ::std::fmt::Debug for SessionMetadata {
    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        ::protobuf::text_format::fmt(self, f)
    }
}

impl ::protobuf::reflect::ProtobufValue for SessionMetadata {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Message(self)
    }
}

#[derive(PartialEq,Clone,Default)]
pub struct ConfigProto {
    // message fields
    pub device_count: ::std::collections::HashMap<::std::string::String, i32>,
    pub intra_op_parallelism_threads: i32,
    pub inter_op_parallelism_threads: i32,
    pub use_per_session_threads: bool,
    pub session_inter_op_thread_pool: ::protobuf::RepeatedField<ThreadPoolOptionProto>,
    pub placement_period: i32,
    pub device_filters: ::protobuf::RepeatedField<::std::string::String>,
    pub gpu_options: ::protobuf::SingularPtrField<GPUOptions>,
    pub allow_soft_placement: bool,
    pub log_device_placement: bool,
    pub graph_options: ::protobuf::SingularPtrField<GraphOptions>,
    pub operation_timeout_in_ms: i64,
    pub rpc_options: ::protobuf::SingularPtrField<RPCOptions>,
    pub cluster_def: ::protobuf::SingularPtrField<super::cluster::ClusterDef>,
    pub isolate_session_state: bool,
    pub share_cluster_devices_in_session: bool,
    pub experimental: ::protobuf::SingularPtrField<ConfigProto_Experimental>,
    // special fields
    pub unknown_fields: ::protobuf::UnknownFields,
    pub cached_size: ::protobuf::CachedSize,
}

impl<'a> ::std::default::Default for &'a ConfigProto {
    fn default() -> &'a ConfigProto {
        <ConfigProto as ::protobuf::Message>::default_instance()
    }
}

impl ConfigProto {
    pub fn new() -> ConfigProto {
        ::std::default::Default::default()
    }

    // repeated .tensorflow.ConfigProto.DeviceCountEntry device_count = 1;


    pub fn get_device_count(&self) -> &::std::collections::HashMap<::std::string::String, i32> {
        &self.device_count
    }
    pub fn clear_device_count(&mut self) {
        self.device_count.clear();
    }

    // Param is passed by value, moved
    pub fn set_device_count(&mut self, v: ::std::collections::HashMap<::std::string::String, i32>) {
        self.device_count = v;
    }

    // Mutable pointer to the field.
    pub fn mut_device_count(&mut self) -> &mut ::std::collections::HashMap<::std::string::String, i32> {
        &mut self.device_count
    }

    // Take field
    pub fn take_device_count(&mut self) -> ::std::collections::HashMap<::std::string::String, i32> {
        ::std::mem::replace(&mut self.device_count, ::std::collections::HashMap::new())
    }

    // int32 intra_op_parallelism_threads = 2;


    pub fn get_intra_op_parallelism_threads(&self) -> i32 {
        self.intra_op_parallelism_threads
    }
    pub fn clear_intra_op_parallelism_threads(&mut self) {
        self.intra_op_parallelism_threads = 0;
    }

    // Param is passed by value, moved
    pub fn set_intra_op_parallelism_threads(&mut self, v: i32) {
        self.intra_op_parallelism_threads = v;
    }

    // int32 inter_op_parallelism_threads = 5;


    pub fn get_inter_op_parallelism_threads(&self) -> i32 {
        self.inter_op_parallelism_threads
    }
    pub fn clear_inter_op_parallelism_threads(&mut self) {
        self.inter_op_parallelism_threads = 0;
    }

    // Param is passed by value, moved
    pub fn set_inter_op_parallelism_threads(&mut self, v: i32) {
        self.inter_op_parallelism_threads = v;
    }

    // bool use_per_session_threads = 9;


    pub fn get_use_per_session_threads(&self) -> bool {
        self.use_per_session_threads
    }
    pub fn clear_use_per_session_threads(&mut self) {
        self.use_per_session_threads = false;
    }

    // Param is passed by value, moved
    pub fn set_use_per_session_threads(&mut self, v: bool) {
        self.use_per_session_threads = v;
    }

    // repeated .tensorflow.ThreadPoolOptionProto session_inter_op_thread_pool = 12;


    pub fn get_session_inter_op_thread_pool(&self) -> &[ThreadPoolOptionProto] {
        &self.session_inter_op_thread_pool
    }
    pub fn clear_session_inter_op_thread_pool(&mut self) {
        self.session_inter_op_thread_pool.clear();
    }

    // Param is passed by value, moved
    pub fn set_session_inter_op_thread_pool(&mut self, v: ::protobuf::RepeatedField<ThreadPoolOptionProto>) {
        self.session_inter_op_thread_pool = v;
    }

    // Mutable pointer to the field.
    pub fn mut_session_inter_op_thread_pool(&mut self) -> &mut ::protobuf::RepeatedField<ThreadPoolOptionProto> {
        &mut self.session_inter_op_thread_pool
    }

    // Take field
    pub fn take_session_inter_op_thread_pool(&mut self) -> ::protobuf::RepeatedField<ThreadPoolOptionProto> {
        ::std::mem::replace(&mut self.session_inter_op_thread_pool, ::protobuf::RepeatedField::new())
    }

    // int32 placement_period = 3;


    pub fn get_placement_period(&self) -> i32 {
        self.placement_period
    }
    pub fn clear_placement_period(&mut self) {
        self.placement_period = 0;
    }

    // Param is passed by value, moved
    pub fn set_placement_period(&mut self, v: i32) {
        self.placement_period = v;
    }

    // repeated string device_filters = 4;


    pub fn get_device_filters(&self) -> &[::std::string::String] {
        &self.device_filters
    }
    pub fn clear_device_filters(&mut self) {
        self.device_filters.clear();
    }

    // Param is passed by value, moved
    pub fn set_device_filters(&mut self, v: ::protobuf::RepeatedField<::std::string::String>) {
        self.device_filters = v;
    }

    // Mutable pointer to the field.
    pub fn mut_device_filters(&mut self) -> &mut ::protobuf::RepeatedField<::std::string::String> {
        &mut self.device_filters
    }

    // Take field
    pub fn take_device_filters(&mut self) -> ::protobuf::RepeatedField<::std::string::String> {
        ::std::mem::replace(&mut self.device_filters, ::protobuf::RepeatedField::new())
    }

    // .tensorflow.GPUOptions gpu_options = 6;


    pub fn get_gpu_options(&self) -> &GPUOptions {
        self.gpu_options.as_ref().unwrap_or_else(|| GPUOptions::default_instance())
    }
    pub fn clear_gpu_options(&mut self) {
        self.gpu_options.clear();
    }

    pub fn has_gpu_options(&self) -> bool {
        self.gpu_options.is_some()
    }

    // Param is passed by value, moved
    pub fn set_gpu_options(&mut self, v: GPUOptions) {
        self.gpu_options = ::protobuf::SingularPtrField::some(v);
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_gpu_options(&mut self) -> &mut GPUOptions {
        if self.gpu_options.is_none() {
            self.gpu_options.set_default();
        }
        self.gpu_options.as_mut().unwrap()
    }

    // Take field
    pub fn take_gpu_options(&mut self) -> GPUOptions {
        self.gpu_options.take().unwrap_or_else(|| GPUOptions::new())
    }

    // bool allow_soft_placement = 7;


    pub fn get_allow_soft_placement(&self) -> bool {
        self.allow_soft_placement
    }
    pub fn clear_allow_soft_placement(&mut self) {
        self.allow_soft_placement = false;
    }

    // Param is passed by value, moved
    pub fn set_allow_soft_placement(&mut self, v: bool) {
        self.allow_soft_placement = v;
    }

    // bool log_device_placement = 8;


    pub fn get_log_device_placement(&self) -> bool {
        self.log_device_placement
    }
    pub fn clear_log_device_placement(&mut self) {
        self.log_device_placement = false;
    }

    // Param is passed by value, moved
    pub fn set_log_device_placement(&mut self, v: bool) {
        self.log_device_placement = v;
    }

    // .tensorflow.GraphOptions graph_options = 10;


    pub fn get_graph_options(&self) -> &GraphOptions {
        self.graph_options.as_ref().unwrap_or_else(|| GraphOptions::default_instance())
    }
    pub fn clear_graph_options(&mut self) {
        self.graph_options.clear();
    }

    pub fn has_graph_options(&self) -> bool {
        self.graph_options.is_some()
    }

    // Param is passed by value, moved
    pub fn set_graph_options(&mut self, v: GraphOptions) {
        self.graph_options = ::protobuf::SingularPtrField::some(v);
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_graph_options(&mut self) -> &mut GraphOptions {
        if self.graph_options.is_none() {
            self.graph_options.set_default();
        }
        self.graph_options.as_mut().unwrap()
    }

    // Take field
    pub fn take_graph_options(&mut self) -> GraphOptions {
        self.graph_options.take().unwrap_or_else(|| GraphOptions::new())
    }

    // int64 operation_timeout_in_ms = 11;


    pub fn get_operation_timeout_in_ms(&self) -> i64 {
        self.operation_timeout_in_ms
    }
    pub fn clear_operation_timeout_in_ms(&mut self) {
        self.operation_timeout_in_ms = 0;
    }

    // Param is passed by value, moved
    pub fn set_operation_timeout_in_ms(&mut self, v: i64) {
        self.operation_timeout_in_ms = v;
    }

    // .tensorflow.RPCOptions rpc_options = 13;


    pub fn get_rpc_options(&self) -> &RPCOptions {
        self.rpc_options.as_ref().unwrap_or_else(|| RPCOptions::default_instance())
    }
    pub fn clear_rpc_options(&mut self) {
        self.rpc_options.clear();
    }

    pub fn has_rpc_options(&self) -> bool {
        self.rpc_options.is_some()
    }

    // Param is passed by value, moved
    pub fn set_rpc_options(&mut self, v: RPCOptions) {
        self.rpc_options = ::protobuf::SingularPtrField::some(v);
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_rpc_options(&mut self) -> &mut RPCOptions {
        if self.rpc_options.is_none() {
            self.rpc_options.set_default();
        }
        self.rpc_options.as_mut().unwrap()
    }

    // Take field
    pub fn take_rpc_options(&mut self) -> RPCOptions {
        self.rpc_options.take().unwrap_or_else(|| RPCOptions::new())
    }

    // .tensorflow.ClusterDef cluster_def = 14;


    pub fn get_cluster_def(&self) -> &super::cluster::ClusterDef {
        self.cluster_def.as_ref().unwrap_or_else(|| super::cluster::ClusterDef::default_instance())
    }
    pub fn clear_cluster_def(&mut self) {
        self.cluster_def.clear();
    }

    pub fn has_cluster_def(&self) -> bool {
        self.cluster_def.is_some()
    }

    // Param is passed by value, moved
    pub fn set_cluster_def(&mut self, v: super::cluster::ClusterDef) {
        self.cluster_def = ::protobuf::SingularPtrField::some(v);
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_cluster_def(&mut self) -> &mut super::cluster::ClusterDef {
        if self.cluster_def.is_none() {
            self.cluster_def.set_default();
        }
        self.cluster_def.as_mut().unwrap()
    }

    // Take field
    pub fn take_cluster_def(&mut self) -> super::cluster::ClusterDef {
        self.cluster_def.take().unwrap_or_else(|| super::cluster::ClusterDef::new())
    }

    // bool isolate_session_state = 15;


    pub fn get_isolate_session_state(&self) -> bool {
        self.isolate_session_state
    }
    pub fn clear_isolate_session_state(&mut self) {
        self.isolate_session_state = false;
    }

    // Param is passed by value, moved
    pub fn set_isolate_session_state(&mut self, v: bool) {
        self.isolate_session_state = v;
    }

    // bool share_cluster_devices_in_session = 17;


    pub fn get_share_cluster_devices_in_session(&self) -> bool {
        self.share_cluster_devices_in_session
    }
    pub fn clear_share_cluster_devices_in_session(&mut self) {
        self.share_cluster_devices_in_session = false;
    }

    // Param is passed by value, moved
    pub fn set_share_cluster_devices_in_session(&mut self, v: bool) {
        self.share_cluster_devices_in_session = v;
    }

    // .tensorflow.ConfigProto.Experimental experimental = 16;


    pub fn get_experimental(&self) -> &ConfigProto_Experimental {
        self.experimental.as_ref().unwrap_or_else(|| ConfigProto_Experimental::default_instance())
    }
    pub fn clear_experimental(&mut self) {
        self.experimental.clear();
    }

    pub fn has_experimental(&self) -> bool {
        self.experimental.is_some()
    }

    // Param is passed by value, moved
    pub fn set_experimental(&mut self, v: ConfigProto_Experimental) {
        self.experimental = ::protobuf::SingularPtrField::some(v);
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_experimental(&mut self) -> &mut ConfigProto_Experimental {
        if self.experimental.is_none() {
            self.experimental.set_default();
        }
        self.experimental.as_mut().unwrap()
    }

    // Take field
    pub fn take_experimental(&mut self) -> ConfigProto_Experimental {
        self.experimental.take().unwrap_or_else(|| ConfigProto_Experimental::new())
    }
}

impl ::protobuf::Message for ConfigProto {
    fn is_initialized(&self) -> bool {
        for v in &self.session_inter_op_thread_pool {
            if !v.is_initialized() {
                return false;
            }
        };
        for v in &self.gpu_options {
            if !v.is_initialized() {
                return false;
            }
        };
        for v in &self.graph_options {
            if !v.is_initialized() {
                return false;
            }
        };
        for v in &self.rpc_options {
            if !v.is_initialized() {
                return false;
            }
        };
        for v in &self.cluster_def {
            if !v.is_initialized() {
                return false;
            }
        };
        for v in &self.experimental {
            if !v.is_initialized() {
                return false;
            }
        };
        true
    }

    fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        while !is.eof()? {
            let (field_number, wire_type) = is.read_tag_unpack()?;
            match field_number {
                1 => {
                    ::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeInt32>(wire_type, is, &mut self.device_count)?;
                },
                2 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int32()?;
                    self.intra_op_parallelism_threads = tmp;
                },
                5 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int32()?;
                    self.inter_op_parallelism_threads = tmp;
                },
                9 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.use_per_session_threads = tmp;
                },
                12 => {
                    ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.session_inter_op_thread_pool)?;
                },
                3 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int32()?;
                    self.placement_period = tmp;
                },
                4 => {
                    ::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.device_filters)?;
                },
                6 => {
                    ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.gpu_options)?;
                },
                7 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.allow_soft_placement = tmp;
                },
                8 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.log_device_placement = tmp;
                },
                10 => {
                    ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.graph_options)?;
                },
                11 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int64()?;
                    self.operation_timeout_in_ms = tmp;
                },
                13 => {
                    ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.rpc_options)?;
                },
                14 => {
                    ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.cluster_def)?;
                },
                15 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.isolate_session_state = tmp;
                },
                17 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.share_cluster_devices_in_session = tmp;
                },
                16 => {
                    ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.experimental)?;
                },
                _ => {
                    ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
                },
            };
        }
        ::std::result::Result::Ok(())
    }

    // Compute sizes of nested messages
    #[allow(unused_variables)]
    fn compute_size(&self) -> u32 {
        let mut my_size = 0;
        my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeInt32>(1, &self.device_count);
        if self.intra_op_parallelism_threads != 0 {
            my_size += ::protobuf::rt::value_size(2, self.intra_op_parallelism_threads, ::protobuf::wire_format::WireTypeVarint);
        }
        if self.inter_op_parallelism_threads != 0 {
            my_size += ::protobuf::rt::value_size(5, self.inter_op_parallelism_threads, ::protobuf::wire_format::WireTypeVarint);
        }
        if self.use_per_session_threads != false {
            my_size += 2;
        }
        for value in &self.session_inter_op_thread_pool {
            let len = value.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        };
        if self.placement_period != 0 {
            my_size += ::protobuf::rt::value_size(3, self.placement_period, ::protobuf::wire_format::WireTypeVarint);
        }
        for value in &self.device_filters {
            my_size += ::protobuf::rt::string_size(4, &value);
        };
        if let Some(ref v) = self.gpu_options.as_ref() {
            let len = v.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        }
        if self.allow_soft_placement != false {
            my_size += 2;
        }
        if self.log_device_placement != false {
            my_size += 2;
        }
        if let Some(ref v) = self.graph_options.as_ref() {
            let len = v.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        }
        if self.operation_timeout_in_ms != 0 {
            my_size += ::protobuf::rt::value_size(11, self.operation_timeout_in_ms, ::protobuf::wire_format::WireTypeVarint);
        }
        if let Some(ref v) = self.rpc_options.as_ref() {
            let len = v.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        }
        if let Some(ref v) = self.cluster_def.as_ref() {
            let len = v.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        }
        if self.isolate_session_state != false {
            my_size += 2;
        }
        if self.share_cluster_devices_in_session != false {
            my_size += 3;
        }
        if let Some(ref v) = self.experimental.as_ref() {
            let len = v.compute_size();
            my_size += 2 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        }
        my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
        self.cached_size.set(my_size);
        my_size
    }

    fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        ::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeInt32>(1, &self.device_count, os)?;
        if self.intra_op_parallelism_threads != 0 {
            os.write_int32(2, self.intra_op_parallelism_threads)?;
        }
        if self.inter_op_parallelism_threads != 0 {
            os.write_int32(5, self.inter_op_parallelism_threads)?;
        }
        if self.use_per_session_threads != false {
            os.write_bool(9, self.use_per_session_threads)?;
        }
        for v in &self.session_inter_op_thread_pool {
            os.write_tag(12, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        };
        if self.placement_period != 0 {
            os.write_int32(3, self.placement_period)?;
        }
        for v in &self.device_filters {
            os.write_string(4, &v)?;
        };
        if let Some(ref v) = self.gpu_options.as_ref() {
            os.write_tag(6, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        }
        if self.allow_soft_placement != false {
            os.write_bool(7, self.allow_soft_placement)?;
        }
        if self.log_device_placement != false {
            os.write_bool(8, self.log_device_placement)?;
        }
        if let Some(ref v) = self.graph_options.as_ref() {
            os.write_tag(10, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        }
        if self.operation_timeout_in_ms != 0 {
            os.write_int64(11, self.operation_timeout_in_ms)?;
        }
        if let Some(ref v) = self.rpc_options.as_ref() {
            os.write_tag(13, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        }
        if let Some(ref v) = self.cluster_def.as_ref() {
            os.write_tag(14, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        }
        if self.isolate_session_state != false {
            os.write_bool(15, self.isolate_session_state)?;
        }
        if self.share_cluster_devices_in_session != false {
            os.write_bool(17, self.share_cluster_devices_in_session)?;
        }
        if let Some(ref v) = self.experimental.as_ref() {
            os.write_tag(16, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        }
        os.write_unknown_fields(self.get_unknown_fields())?;
        ::std::result::Result::Ok(())
    }

    fn get_cached_size(&self) -> u32 {
        self.cached_size.get()
    }

    fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
        &self.unknown_fields
    }

    fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
        &mut self.unknown_fields
    }

    fn as_any(&self) -> &dyn (::std::any::Any) {
        self as &dyn (::std::any::Any)
    }
    fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
        self as &mut dyn (::std::any::Any)
    }
    fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
        self
    }

    fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
        Self::descriptor_static()
    }

    fn new() -> ConfigProto {
        ConfigProto::new()
    }

    fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                let mut fields = ::std::vec::Vec::new();
                fields.push(::protobuf::reflect::accessor::make_map_accessor::<_, ::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeInt32>(
                    "device_count",
                    |m: &ConfigProto| { &m.device_count },
                    |m: &mut ConfigProto| { &mut m.device_count },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
                    "intra_op_parallelism_threads",
                    |m: &ConfigProto| { &m.intra_op_parallelism_threads },
                    |m: &mut ConfigProto| { &mut m.intra_op_parallelism_threads },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
                    "inter_op_parallelism_threads",
                    |m: &ConfigProto| { &m.inter_op_parallelism_threads },
                    |m: &mut ConfigProto| { &mut m.inter_op_parallelism_threads },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "use_per_session_threads",
                    |m: &ConfigProto| { &m.use_per_session_threads },
                    |m: &mut ConfigProto| { &mut m.use_per_session_threads },
                ));
                fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<ThreadPoolOptionProto>>(
                    "session_inter_op_thread_pool",
                    |m: &ConfigProto| { &m.session_inter_op_thread_pool },
                    |m: &mut ConfigProto| { &mut m.session_inter_op_thread_pool },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
                    "placement_period",
                    |m: &ConfigProto| { &m.placement_period },
                    |m: &mut ConfigProto| { &mut m.placement_period },
                ));
                fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
                    "device_filters",
                    |m: &ConfigProto| { &m.device_filters },
                    |m: &mut ConfigProto| { &mut m.device_filters },
                ));
                fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<GPUOptions>>(
                    "gpu_options",
                    |m: &ConfigProto| { &m.gpu_options },
                    |m: &mut ConfigProto| { &mut m.gpu_options },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "allow_soft_placement",
                    |m: &ConfigProto| { &m.allow_soft_placement },
                    |m: &mut ConfigProto| { &mut m.allow_soft_placement },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "log_device_placement",
                    |m: &ConfigProto| { &m.log_device_placement },
                    |m: &mut ConfigProto| { &mut m.log_device_placement },
                ));
                fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<GraphOptions>>(
                    "graph_options",
                    |m: &ConfigProto| { &m.graph_options },
                    |m: &mut ConfigProto| { &mut m.graph_options },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
                    "operation_timeout_in_ms",
                    |m: &ConfigProto| { &m.operation_timeout_in_ms },
                    |m: &mut ConfigProto| { &mut m.operation_timeout_in_ms },
                ));
                fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<RPCOptions>>(
                    "rpc_options",
                    |m: &ConfigProto| { &m.rpc_options },
                    |m: &mut ConfigProto| { &mut m.rpc_options },
                ));
                fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::cluster::ClusterDef>>(
                    "cluster_def",
                    |m: &ConfigProto| { &m.cluster_def },
                    |m: &mut ConfigProto| { &mut m.cluster_def },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "isolate_session_state",
                    |m: &ConfigProto| { &m.isolate_session_state },
                    |m: &mut ConfigProto| { &mut m.isolate_session_state },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "share_cluster_devices_in_session",
                    |m: &ConfigProto| { &m.share_cluster_devices_in_session },
                    |m: &mut ConfigProto| { &mut m.share_cluster_devices_in_session },
                ));
                fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<ConfigProto_Experimental>>(
                    "experimental",
                    |m: &ConfigProto| { &m.experimental },
                    |m: &mut ConfigProto| { &mut m.experimental },
                ));
                ::protobuf::reflect::MessageDescriptor::new_pb_name::<ConfigProto>(
                    "ConfigProto",
                    fields,
                    file_descriptor_proto()
                )
            })
        }
    }

    fn default_instance() -> &'static ConfigProto {
        static mut instance: ::protobuf::lazy::Lazy<ConfigProto> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            instance.get(ConfigProto::new)
        }
    }
}

impl ::protobuf::Clear for ConfigProto {
    fn clear(&mut self) {
        self.device_count.clear();
        self.intra_op_parallelism_threads = 0;
        self.inter_op_parallelism_threads = 0;
        self.use_per_session_threads = false;
        self.session_inter_op_thread_pool.clear();
        self.placement_period = 0;
        self.device_filters.clear();
        self.gpu_options.clear();
        self.allow_soft_placement = false;
        self.log_device_placement = false;
        self.graph_options.clear();
        self.operation_timeout_in_ms = 0;
        self.rpc_options.clear();
        self.cluster_def.clear();
        self.isolate_session_state = false;
        self.share_cluster_devices_in_session = false;
        self.experimental.clear();
        self.unknown_fields.clear();
    }
}

impl ::std::fmt::Debug for ConfigProto {
    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        ::protobuf::text_format::fmt(self, f)
    }
}

impl ::protobuf::reflect::ProtobufValue for ConfigProto {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Message(self)
    }
}

#[derive(PartialEq,Clone,Default)]
pub struct ConfigProto_Experimental {
    // message fields
    pub collective_group_leader: ::std::string::String,
    pub executor_type: ::std::string::String,
    pub recv_buf_max_chunk: i32,
    pub use_numa_affinity: bool,
    pub collective_deterministic_sequential_execution: bool,
    pub collective_nccl: bool,
    pub share_session_state_in_clusterspec_propagation: bool,
    pub disable_thread_spinning: bool,
    pub share_cluster_devices_in_session: bool,
    pub session_metadata: ::protobuf::SingularPtrField<SessionMetadata>,
    pub optimize_for_static_graph: bool,
    pub enable_mlir_bridge: bool,
    pub enable_mlir_graph_optimization: bool,
    pub disable_output_partition_graphs: bool,
    pub xla_fusion_autotuner_thresh: i64,
    // special fields
    pub unknown_fields: ::protobuf::UnknownFields,
    pub cached_size: ::protobuf::CachedSize,
}

impl<'a> ::std::default::Default for &'a ConfigProto_Experimental {
    fn default() -> &'a ConfigProto_Experimental {
        <ConfigProto_Experimental as ::protobuf::Message>::default_instance()
    }
}

impl ConfigProto_Experimental {
    pub fn new() -> ConfigProto_Experimental {
        ::std::default::Default::default()
    }

    // string collective_group_leader = 1;


    pub fn get_collective_group_leader(&self) -> &str {
        &self.collective_group_leader
    }
    pub fn clear_collective_group_leader(&mut self) {
        self.collective_group_leader.clear();
    }

    // Param is passed by value, moved
    pub fn set_collective_group_leader(&mut self, v: ::std::string::String) {
        self.collective_group_leader = v;
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_collective_group_leader(&mut self) -> &mut ::std::string::String {
        &mut self.collective_group_leader
    }

    // Take field
    pub fn take_collective_group_leader(&mut self) -> ::std::string::String {
        ::std::mem::replace(&mut self.collective_group_leader, ::std::string::String::new())
    }

    // string executor_type = 3;


    pub fn get_executor_type(&self) -> &str {
        &self.executor_type
    }
    pub fn clear_executor_type(&mut self) {
        self.executor_type.clear();
    }

    // Param is passed by value, moved
    pub fn set_executor_type(&mut self, v: ::std::string::String) {
        self.executor_type = v;
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_executor_type(&mut self) -> &mut ::std::string::String {
        &mut self.executor_type
    }

    // Take field
    pub fn take_executor_type(&mut self) -> ::std::string::String {
        ::std::mem::replace(&mut self.executor_type, ::std::string::String::new())
    }

    // int32 recv_buf_max_chunk = 4;


    pub fn get_recv_buf_max_chunk(&self) -> i32 {
        self.recv_buf_max_chunk
    }
    pub fn clear_recv_buf_max_chunk(&mut self) {
        self.recv_buf_max_chunk = 0;
    }

    // Param is passed by value, moved
    pub fn set_recv_buf_max_chunk(&mut self, v: i32) {
        self.recv_buf_max_chunk = v;
    }

    // bool use_numa_affinity = 5;


    pub fn get_use_numa_affinity(&self) -> bool {
        self.use_numa_affinity
    }
    pub fn clear_use_numa_affinity(&mut self) {
        self.use_numa_affinity = false;
    }

    // Param is passed by value, moved
    pub fn set_use_numa_affinity(&mut self, v: bool) {
        self.use_numa_affinity = v;
    }

    // bool collective_deterministic_sequential_execution = 6;


    pub fn get_collective_deterministic_sequential_execution(&self) -> bool {
        self.collective_deterministic_sequential_execution
    }
    pub fn clear_collective_deterministic_sequential_execution(&mut self) {
        self.collective_deterministic_sequential_execution = false;
    }

    // Param is passed by value, moved
    pub fn set_collective_deterministic_sequential_execution(&mut self, v: bool) {
        self.collective_deterministic_sequential_execution = v;
    }

    // bool collective_nccl = 7;


    pub fn get_collective_nccl(&self) -> bool {
        self.collective_nccl
    }
    pub fn clear_collective_nccl(&mut self) {
        self.collective_nccl = false;
    }

    // Param is passed by value, moved
    pub fn set_collective_nccl(&mut self, v: bool) {
        self.collective_nccl = v;
    }

    // bool share_session_state_in_clusterspec_propagation = 8;


    pub fn get_share_session_state_in_clusterspec_propagation(&self) -> bool {
        self.share_session_state_in_clusterspec_propagation
    }
    pub fn clear_share_session_state_in_clusterspec_propagation(&mut self) {
        self.share_session_state_in_clusterspec_propagation = false;
    }

    // Param is passed by value, moved
    pub fn set_share_session_state_in_clusterspec_propagation(&mut self, v: bool) {
        self.share_session_state_in_clusterspec_propagation = v;
    }

    // bool disable_thread_spinning = 9;


    pub fn get_disable_thread_spinning(&self) -> bool {
        self.disable_thread_spinning
    }
    pub fn clear_disable_thread_spinning(&mut self) {
        self.disable_thread_spinning = false;
    }

    // Param is passed by value, moved
    pub fn set_disable_thread_spinning(&mut self, v: bool) {
        self.disable_thread_spinning = v;
    }

    // bool share_cluster_devices_in_session = 10;


    pub fn get_share_cluster_devices_in_session(&self) -> bool {
        self.share_cluster_devices_in_session
    }
    pub fn clear_share_cluster_devices_in_session(&mut self) {
        self.share_cluster_devices_in_session = false;
    }

    // Param is passed by value, moved
    pub fn set_share_cluster_devices_in_session(&mut self, v: bool) {
        self.share_cluster_devices_in_session = v;
    }

    // .tensorflow.SessionMetadata session_metadata = 11;


    pub fn get_session_metadata(&self) -> &SessionMetadata {
        self.session_metadata.as_ref().unwrap_or_else(|| SessionMetadata::default_instance())
    }
    pub fn clear_session_metadata(&mut self) {
        self.session_metadata.clear();
    }

    pub fn has_session_metadata(&self) -> bool {
        self.session_metadata.is_some()
    }

    // Param is passed by value, moved
    pub fn set_session_metadata(&mut self, v: SessionMetadata) {
        self.session_metadata = ::protobuf::SingularPtrField::some(v);
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_session_metadata(&mut self) -> &mut SessionMetadata {
        if self.session_metadata.is_none() {
            self.session_metadata.set_default();
        }
        self.session_metadata.as_mut().unwrap()
    }

    // Take field
    pub fn take_session_metadata(&mut self) -> SessionMetadata {
        self.session_metadata.take().unwrap_or_else(|| SessionMetadata::new())
    }

    // bool optimize_for_static_graph = 12;


    pub fn get_optimize_for_static_graph(&self) -> bool {
        self.optimize_for_static_graph
    }
    pub fn clear_optimize_for_static_graph(&mut self) {
        self.optimize_for_static_graph = false;
    }

    // Param is passed by value, moved
    pub fn set_optimize_for_static_graph(&mut self, v: bool) {
        self.optimize_for_static_graph = v;
    }

    // bool enable_mlir_bridge = 13;


    pub fn get_enable_mlir_bridge(&self) -> bool {
        self.enable_mlir_bridge
    }
    pub fn clear_enable_mlir_bridge(&mut self) {
        self.enable_mlir_bridge = false;
    }

    // Param is passed by value, moved
    pub fn set_enable_mlir_bridge(&mut self, v: bool) {
        self.enable_mlir_bridge = v;
    }

    // bool enable_mlir_graph_optimization = 16;


    pub fn get_enable_mlir_graph_optimization(&self) -> bool {
        self.enable_mlir_graph_optimization
    }
    pub fn clear_enable_mlir_graph_optimization(&mut self) {
        self.enable_mlir_graph_optimization = false;
    }

    // Param is passed by value, moved
    pub fn set_enable_mlir_graph_optimization(&mut self, v: bool) {
        self.enable_mlir_graph_optimization = v;
    }

    // bool disable_output_partition_graphs = 14;


    pub fn get_disable_output_partition_graphs(&self) -> bool {
        self.disable_output_partition_graphs
    }
    pub fn clear_disable_output_partition_graphs(&mut self) {
        self.disable_output_partition_graphs = false;
    }

    // Param is passed by value, moved
    pub fn set_disable_output_partition_graphs(&mut self, v: bool) {
        self.disable_output_partition_graphs = v;
    }

    // int64 xla_fusion_autotuner_thresh = 15;


    pub fn get_xla_fusion_autotuner_thresh(&self) -> i64 {
        self.xla_fusion_autotuner_thresh
    }
    pub fn clear_xla_fusion_autotuner_thresh(&mut self) {
        self.xla_fusion_autotuner_thresh = 0;
    }

    // Param is passed by value, moved
    pub fn set_xla_fusion_autotuner_thresh(&mut self, v: i64) {
        self.xla_fusion_autotuner_thresh = v;
    }
}

impl ::protobuf::Message for ConfigProto_Experimental {
    fn is_initialized(&self) -> bool {
        for v in &self.session_metadata {
            if !v.is_initialized() {
                return false;
            }
        };
        true
    }

    fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        while !is.eof()? {
            let (field_number, wire_type) = is.read_tag_unpack()?;
            match field_number {
                1 => {
                    ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.collective_group_leader)?;
                },
                3 => {
                    ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.executor_type)?;
                },
                4 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int32()?;
                    self.recv_buf_max_chunk = tmp;
                },
                5 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.use_numa_affinity = tmp;
                },
                6 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.collective_deterministic_sequential_execution = tmp;
                },
                7 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.collective_nccl = tmp;
                },
                8 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.share_session_state_in_clusterspec_propagation = tmp;
                },
                9 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.disable_thread_spinning = tmp;
                },
                10 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.share_cluster_devices_in_session = tmp;
                },
                11 => {
                    ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.session_metadata)?;
                },
                12 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.optimize_for_static_graph = tmp;
                },
                13 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.enable_mlir_bridge = tmp;
                },
                16 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.enable_mlir_graph_optimization = tmp;
                },
                14 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.disable_output_partition_graphs = tmp;
                },
                15 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int64()?;
                    self.xla_fusion_autotuner_thresh = tmp;
                },
                _ => {
                    ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
                },
            };
        }
        ::std::result::Result::Ok(())
    }

    // Compute sizes of nested messages
    #[allow(unused_variables)]
    fn compute_size(&self) -> u32 {
        let mut my_size = 0;
        if !self.collective_group_leader.is_empty() {
            my_size += ::protobuf::rt::string_size(1, &self.collective_group_leader);
        }
        if !self.executor_type.is_empty() {
            my_size += ::protobuf::rt::string_size(3, &self.executor_type);
        }
        if self.recv_buf_max_chunk != 0 {
            my_size += ::protobuf::rt::value_size(4, self.recv_buf_max_chunk, ::protobuf::wire_format::WireTypeVarint);
        }
        if self.use_numa_affinity != false {
            my_size += 2;
        }
        if self.collective_deterministic_sequential_execution != false {
            my_size += 2;
        }
        if self.collective_nccl != false {
            my_size += 2;
        }
        if self.share_session_state_in_clusterspec_propagation != false {
            my_size += 2;
        }
        if self.disable_thread_spinning != false {
            my_size += 2;
        }
        if self.share_cluster_devices_in_session != false {
            my_size += 2;
        }
        if let Some(ref v) = self.session_metadata.as_ref() {
            let len = v.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        }
        if self.optimize_for_static_graph != false {
            my_size += 2;
        }
        if self.enable_mlir_bridge != false {
            my_size += 2;
        }
        if self.enable_mlir_graph_optimization != false {
            my_size += 3;
        }
        if self.disable_output_partition_graphs != false {
            my_size += 2;
        }
        if self.xla_fusion_autotuner_thresh != 0 {
            my_size += ::protobuf::rt::value_size(15, self.xla_fusion_autotuner_thresh, ::protobuf::wire_format::WireTypeVarint);
        }
        my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
        self.cached_size.set(my_size);
        my_size
    }

    fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        if !self.collective_group_leader.is_empty() {
            os.write_string(1, &self.collective_group_leader)?;
        }
        if !self.executor_type.is_empty() {
            os.write_string(3, &self.executor_type)?;
        }
        if self.recv_buf_max_chunk != 0 {
            os.write_int32(4, self.recv_buf_max_chunk)?;
        }
        if self.use_numa_affinity != false {
            os.write_bool(5, self.use_numa_affinity)?;
        }
        if self.collective_deterministic_sequential_execution != false {
            os.write_bool(6, self.collective_deterministic_sequential_execution)?;
        }
        if self.collective_nccl != false {
            os.write_bool(7, self.collective_nccl)?;
        }
        if self.share_session_state_in_clusterspec_propagation != false {
            os.write_bool(8, self.share_session_state_in_clusterspec_propagation)?;
        }
        if self.disable_thread_spinning != false {
            os.write_bool(9, self.disable_thread_spinning)?;
        }
        if self.share_cluster_devices_in_session != false {
            os.write_bool(10, self.share_cluster_devices_in_session)?;
        }
        if let Some(ref v) = self.session_metadata.as_ref() {
            os.write_tag(11, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        }
        if self.optimize_for_static_graph != false {
            os.write_bool(12, self.optimize_for_static_graph)?;
        }
        if self.enable_mlir_bridge != false {
            os.write_bool(13, self.enable_mlir_bridge)?;
        }
        if self.enable_mlir_graph_optimization != false {
            os.write_bool(16, self.enable_mlir_graph_optimization)?;
        }
        if self.disable_output_partition_graphs != false {
            os.write_bool(14, self.disable_output_partition_graphs)?;
        }
        if self.xla_fusion_autotuner_thresh != 0 {
            os.write_int64(15, self.xla_fusion_autotuner_thresh)?;
        }
        os.write_unknown_fields(self.get_unknown_fields())?;
        ::std::result::Result::Ok(())
    }

    fn get_cached_size(&self) -> u32 {
        self.cached_size.get()
    }

    fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
        &self.unknown_fields
    }

    fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
        &mut self.unknown_fields
    }

    fn as_any(&self) -> &dyn (::std::any::Any) {
        self as &dyn (::std::any::Any)
    }
    fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
        self as &mut dyn (::std::any::Any)
    }
    fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
        self
    }

    fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
        Self::descriptor_static()
    }

    fn new() -> ConfigProto_Experimental {
        ConfigProto_Experimental::new()
    }

    fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                let mut fields = ::std::vec::Vec::new();
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
                    "collective_group_leader",
                    |m: &ConfigProto_Experimental| { &m.collective_group_leader },
                    |m: &mut ConfigProto_Experimental| { &mut m.collective_group_leader },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
                    "executor_type",
                    |m: &ConfigProto_Experimental| { &m.executor_type },
                    |m: &mut ConfigProto_Experimental| { &mut m.executor_type },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
                    "recv_buf_max_chunk",
                    |m: &ConfigProto_Experimental| { &m.recv_buf_max_chunk },
                    |m: &mut ConfigProto_Experimental| { &mut m.recv_buf_max_chunk },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "use_numa_affinity",
                    |m: &ConfigProto_Experimental| { &m.use_numa_affinity },
                    |m: &mut ConfigProto_Experimental| { &mut m.use_numa_affinity },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "collective_deterministic_sequential_execution",
                    |m: &ConfigProto_Experimental| { &m.collective_deterministic_sequential_execution },
                    |m: &mut ConfigProto_Experimental| { &mut m.collective_deterministic_sequential_execution },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "collective_nccl",
                    |m: &ConfigProto_Experimental| { &m.collective_nccl },
                    |m: &mut ConfigProto_Experimental| { &mut m.collective_nccl },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "share_session_state_in_clusterspec_propagation",
                    |m: &ConfigProto_Experimental| { &m.share_session_state_in_clusterspec_propagation },
                    |m: &mut ConfigProto_Experimental| { &mut m.share_session_state_in_clusterspec_propagation },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "disable_thread_spinning",
                    |m: &ConfigProto_Experimental| { &m.disable_thread_spinning },
                    |m: &mut ConfigProto_Experimental| { &mut m.disable_thread_spinning },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "share_cluster_devices_in_session",
                    |m: &ConfigProto_Experimental| { &m.share_cluster_devices_in_session },
                    |m: &mut ConfigProto_Experimental| { &mut m.share_cluster_devices_in_session },
                ));
                fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<SessionMetadata>>(
                    "session_metadata",
                    |m: &ConfigProto_Experimental| { &m.session_metadata },
                    |m: &mut ConfigProto_Experimental| { &mut m.session_metadata },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "optimize_for_static_graph",
                    |m: &ConfigProto_Experimental| { &m.optimize_for_static_graph },
                    |m: &mut ConfigProto_Experimental| { &mut m.optimize_for_static_graph },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "enable_mlir_bridge",
                    |m: &ConfigProto_Experimental| { &m.enable_mlir_bridge },
                    |m: &mut ConfigProto_Experimental| { &mut m.enable_mlir_bridge },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "enable_mlir_graph_optimization",
                    |m: &ConfigProto_Experimental| { &m.enable_mlir_graph_optimization },
                    |m: &mut ConfigProto_Experimental| { &mut m.enable_mlir_graph_optimization },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "disable_output_partition_graphs",
                    |m: &ConfigProto_Experimental| { &m.disable_output_partition_graphs },
                    |m: &mut ConfigProto_Experimental| { &mut m.disable_output_partition_graphs },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
                    "xla_fusion_autotuner_thresh",
                    |m: &ConfigProto_Experimental| { &m.xla_fusion_autotuner_thresh },
                    |m: &mut ConfigProto_Experimental| { &mut m.xla_fusion_autotuner_thresh },
                ));
                ::protobuf::reflect::MessageDescriptor::new_pb_name::<ConfigProto_Experimental>(
                    "ConfigProto.Experimental",
                    fields,
                    file_descriptor_proto()
                )
            })
        }
    }

    fn default_instance() -> &'static ConfigProto_Experimental {
        static mut instance: ::protobuf::lazy::Lazy<ConfigProto_Experimental> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            instance.get(ConfigProto_Experimental::new)
        }
    }
}

impl ::protobuf::Clear for ConfigProto_Experimental {
    fn clear(&mut self) {
        self.collective_group_leader.clear();
        self.executor_type.clear();
        self.recv_buf_max_chunk = 0;
        self.use_numa_affinity = false;
        self.collective_deterministic_sequential_execution = false;
        self.collective_nccl = false;
        self.share_session_state_in_clusterspec_propagation = false;
        self.disable_thread_spinning = false;
        self.share_cluster_devices_in_session = false;
        self.session_metadata.clear();
        self.optimize_for_static_graph = false;
        self.enable_mlir_bridge = false;
        self.enable_mlir_graph_optimization = false;
        self.disable_output_partition_graphs = false;
        self.xla_fusion_autotuner_thresh = 0;
        self.unknown_fields.clear();
    }
}

impl ::std::fmt::Debug for ConfigProto_Experimental {
    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        ::protobuf::text_format::fmt(self, f)
    }
}

impl ::protobuf::reflect::ProtobufValue for ConfigProto_Experimental {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Message(self)
    }
}

#[derive(PartialEq,Clone,Default)]
pub struct RunOptions {
    // message fields
    pub trace_level: RunOptions_TraceLevel,
    pub timeout_in_ms: i64,
    pub inter_op_thread_pool: i32,
    pub output_partition_graphs: bool,
    pub debug_options: ::protobuf::SingularPtrField<super::debug::DebugOptions>,
    pub report_tensor_allocations_upon_oom: bool,
    pub experimental: ::protobuf::SingularPtrField<RunOptions_Experimental>,
    // special fields
    pub unknown_fields: ::protobuf::UnknownFields,
    pub cached_size: ::protobuf::CachedSize,
}

impl<'a> ::std::default::Default for &'a RunOptions {
    fn default() -> &'a RunOptions {
        <RunOptions as ::protobuf::Message>::default_instance()
    }
}

impl RunOptions {
    pub fn new() -> RunOptions {
        ::std::default::Default::default()
    }

    // .tensorflow.RunOptions.TraceLevel trace_level = 1;


    pub fn get_trace_level(&self) -> RunOptions_TraceLevel {
        self.trace_level
    }
    pub fn clear_trace_level(&mut self) {
        self.trace_level = RunOptions_TraceLevel::NO_TRACE;
    }

    // Param is passed by value, moved
    pub fn set_trace_level(&mut self, v: RunOptions_TraceLevel) {
        self.trace_level = v;
    }

    // int64 timeout_in_ms = 2;


    pub fn get_timeout_in_ms(&self) -> i64 {
        self.timeout_in_ms
    }
    pub fn clear_timeout_in_ms(&mut self) {
        self.timeout_in_ms = 0;
    }

    // Param is passed by value, moved
    pub fn set_timeout_in_ms(&mut self, v: i64) {
        self.timeout_in_ms = v;
    }

    // int32 inter_op_thread_pool = 3;


    pub fn get_inter_op_thread_pool(&self) -> i32 {
        self.inter_op_thread_pool
    }
    pub fn clear_inter_op_thread_pool(&mut self) {
        self.inter_op_thread_pool = 0;
    }

    // Param is passed by value, moved
    pub fn set_inter_op_thread_pool(&mut self, v: i32) {
        self.inter_op_thread_pool = v;
    }

    // bool output_partition_graphs = 5;


    pub fn get_output_partition_graphs(&self) -> bool {
        self.output_partition_graphs
    }
    pub fn clear_output_partition_graphs(&mut self) {
        self.output_partition_graphs = false;
    }

    // Param is passed by value, moved
    pub fn set_output_partition_graphs(&mut self, v: bool) {
        self.output_partition_graphs = v;
    }

    // .tensorflow.DebugOptions debug_options = 6;


    pub fn get_debug_options(&self) -> &super::debug::DebugOptions {
        self.debug_options.as_ref().unwrap_or_else(|| super::debug::DebugOptions::default_instance())
    }
    pub fn clear_debug_options(&mut self) {
        self.debug_options.clear();
    }

    pub fn has_debug_options(&self) -> bool {
        self.debug_options.is_some()
    }

    // Param is passed by value, moved
    pub fn set_debug_options(&mut self, v: super::debug::DebugOptions) {
        self.debug_options = ::protobuf::SingularPtrField::some(v);
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_debug_options(&mut self) -> &mut super::debug::DebugOptions {
        if self.debug_options.is_none() {
            self.debug_options.set_default();
        }
        self.debug_options.as_mut().unwrap()
    }

    // Take field
    pub fn take_debug_options(&mut self) -> super::debug::DebugOptions {
        self.debug_options.take().unwrap_or_else(|| super::debug::DebugOptions::new())
    }

    // bool report_tensor_allocations_upon_oom = 7;


    pub fn get_report_tensor_allocations_upon_oom(&self) -> bool {
        self.report_tensor_allocations_upon_oom
    }
    pub fn clear_report_tensor_allocations_upon_oom(&mut self) {
        self.report_tensor_allocations_upon_oom = false;
    }

    // Param is passed by value, moved
    pub fn set_report_tensor_allocations_upon_oom(&mut self, v: bool) {
        self.report_tensor_allocations_upon_oom = v;
    }

    // .tensorflow.RunOptions.Experimental experimental = 8;


    pub fn get_experimental(&self) -> &RunOptions_Experimental {
        self.experimental.as_ref().unwrap_or_else(|| RunOptions_Experimental::default_instance())
    }
    pub fn clear_experimental(&mut self) {
        self.experimental.clear();
    }

    pub fn has_experimental(&self) -> bool {
        self.experimental.is_some()
    }

    // Param is passed by value, moved
    pub fn set_experimental(&mut self, v: RunOptions_Experimental) {
        self.experimental = ::protobuf::SingularPtrField::some(v);
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_experimental(&mut self) -> &mut RunOptions_Experimental {
        if self.experimental.is_none() {
            self.experimental.set_default();
        }
        self.experimental.as_mut().unwrap()
    }

    // Take field
    pub fn take_experimental(&mut self) -> RunOptions_Experimental {
        self.experimental.take().unwrap_or_else(|| RunOptions_Experimental::new())
    }
}

impl ::protobuf::Message for RunOptions {
    fn is_initialized(&self) -> bool {
        for v in &self.debug_options {
            if !v.is_initialized() {
                return false;
            }
        };
        for v in &self.experimental {
            if !v.is_initialized() {
                return false;
            }
        };
        true
    }

    fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        while !is.eof()? {
            let (field_number, wire_type) = is.read_tag_unpack()?;
            match field_number {
                1 => {
                    ::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.trace_level, 1, &mut self.unknown_fields)?
                },
                2 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int64()?;
                    self.timeout_in_ms = tmp;
                },
                3 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int32()?;
                    self.inter_op_thread_pool = tmp;
                },
                5 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.output_partition_graphs = tmp;
                },
                6 => {
                    ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.debug_options)?;
                },
                7 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.report_tensor_allocations_upon_oom = tmp;
                },
                8 => {
                    ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.experimental)?;
                },
                _ => {
                    ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
                },
            };
        }
        ::std::result::Result::Ok(())
    }

    // Compute sizes of nested messages
    #[allow(unused_variables)]
    fn compute_size(&self) -> u32 {
        let mut my_size = 0;
        if self.trace_level != RunOptions_TraceLevel::NO_TRACE {
            my_size += ::protobuf::rt::enum_size(1, self.trace_level);
        }
        if self.timeout_in_ms != 0 {
            my_size += ::protobuf::rt::value_size(2, self.timeout_in_ms, ::protobuf::wire_format::WireTypeVarint);
        }
        if self.inter_op_thread_pool != 0 {
            my_size += ::protobuf::rt::value_size(3, self.inter_op_thread_pool, ::protobuf::wire_format::WireTypeVarint);
        }
        if self.output_partition_graphs != false {
            my_size += 2;
        }
        if let Some(ref v) = self.debug_options.as_ref() {
            let len = v.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        }
        if self.report_tensor_allocations_upon_oom != false {
            my_size += 2;
        }
        if let Some(ref v) = self.experimental.as_ref() {
            let len = v.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        }
        my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
        self.cached_size.set(my_size);
        my_size
    }

    fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        if self.trace_level != RunOptions_TraceLevel::NO_TRACE {
            os.write_enum(1, self.trace_level.value())?;
        }
        if self.timeout_in_ms != 0 {
            os.write_int64(2, self.timeout_in_ms)?;
        }
        if self.inter_op_thread_pool != 0 {
            os.write_int32(3, self.inter_op_thread_pool)?;
        }
        if self.output_partition_graphs != false {
            os.write_bool(5, self.output_partition_graphs)?;
        }
        if let Some(ref v) = self.debug_options.as_ref() {
            os.write_tag(6, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        }
        if self.report_tensor_allocations_upon_oom != false {
            os.write_bool(7, self.report_tensor_allocations_upon_oom)?;
        }
        if let Some(ref v) = self.experimental.as_ref() {
            os.write_tag(8, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        }
        os.write_unknown_fields(self.get_unknown_fields())?;
        ::std::result::Result::Ok(())
    }

    fn get_cached_size(&self) -> u32 {
        self.cached_size.get()
    }

    fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
        &self.unknown_fields
    }

    fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
        &mut self.unknown_fields
    }

    fn as_any(&self) -> &dyn (::std::any::Any) {
        self as &dyn (::std::any::Any)
    }
    fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
        self as &mut dyn (::std::any::Any)
    }
    fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
        self
    }

    fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
        Self::descriptor_static()
    }

    fn new() -> RunOptions {
        RunOptions::new()
    }

    fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                let mut fields = ::std::vec::Vec::new();
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum<RunOptions_TraceLevel>>(
                    "trace_level",
                    |m: &RunOptions| { &m.trace_level },
                    |m: &mut RunOptions| { &mut m.trace_level },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
                    "timeout_in_ms",
                    |m: &RunOptions| { &m.timeout_in_ms },
                    |m: &mut RunOptions| { &mut m.timeout_in_ms },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt32>(
                    "inter_op_thread_pool",
                    |m: &RunOptions| { &m.inter_op_thread_pool },
                    |m: &mut RunOptions| { &mut m.inter_op_thread_pool },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "output_partition_graphs",
                    |m: &RunOptions| { &m.output_partition_graphs },
                    |m: &mut RunOptions| { &mut m.output_partition_graphs },
                ));
                fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::debug::DebugOptions>>(
                    "debug_options",
                    |m: &RunOptions| { &m.debug_options },
                    |m: &mut RunOptions| { &mut m.debug_options },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "report_tensor_allocations_upon_oom",
                    |m: &RunOptions| { &m.report_tensor_allocations_upon_oom },
                    |m: &mut RunOptions| { &mut m.report_tensor_allocations_upon_oom },
                ));
                fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<RunOptions_Experimental>>(
                    "experimental",
                    |m: &RunOptions| { &m.experimental },
                    |m: &mut RunOptions| { &mut m.experimental },
                ));
                ::protobuf::reflect::MessageDescriptor::new_pb_name::<RunOptions>(
                    "RunOptions",
                    fields,
                    file_descriptor_proto()
                )
            })
        }
    }

    fn default_instance() -> &'static RunOptions {
        static mut instance: ::protobuf::lazy::Lazy<RunOptions> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            instance.get(RunOptions::new)
        }
    }
}

impl ::protobuf::Clear for RunOptions {
    fn clear(&mut self) {
        self.trace_level = RunOptions_TraceLevel::NO_TRACE;
        self.timeout_in_ms = 0;
        self.inter_op_thread_pool = 0;
        self.output_partition_graphs = false;
        self.debug_options.clear();
        self.report_tensor_allocations_upon_oom = false;
        self.experimental.clear();
        self.unknown_fields.clear();
    }
}

impl ::std::fmt::Debug for RunOptions {
    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        ::protobuf::text_format::fmt(self, f)
    }
}

impl ::protobuf::reflect::ProtobufValue for RunOptions {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Message(self)
    }
}

#[derive(PartialEq,Clone,Default)]
pub struct RunOptions_Experimental {
    // message fields
    pub collective_graph_key: i64,
    pub use_run_handler_pool: bool,
    pub run_handler_pool_options: ::protobuf::SingularPtrField<RunOptions_Experimental_RunHandlerPoolOptions>,
    // special fields
    pub unknown_fields: ::protobuf::UnknownFields,
    pub cached_size: ::protobuf::CachedSize,
}

impl<'a> ::std::default::Default for &'a RunOptions_Experimental {
    fn default() -> &'a RunOptions_Experimental {
        <RunOptions_Experimental as ::protobuf::Message>::default_instance()
    }
}

impl RunOptions_Experimental {
    pub fn new() -> RunOptions_Experimental {
        ::std::default::Default::default()
    }

    // int64 collective_graph_key = 1;


    pub fn get_collective_graph_key(&self) -> i64 {
        self.collective_graph_key
    }
    pub fn clear_collective_graph_key(&mut self) {
        self.collective_graph_key = 0;
    }

    // Param is passed by value, moved
    pub fn set_collective_graph_key(&mut self, v: i64) {
        self.collective_graph_key = v;
    }

    // bool use_run_handler_pool = 2;


    pub fn get_use_run_handler_pool(&self) -> bool {
        self.use_run_handler_pool
    }
    pub fn clear_use_run_handler_pool(&mut self) {
        self.use_run_handler_pool = false;
    }

    // Param is passed by value, moved
    pub fn set_use_run_handler_pool(&mut self, v: bool) {
        self.use_run_handler_pool = v;
    }

    // .tensorflow.RunOptions.Experimental.RunHandlerPoolOptions run_handler_pool_options = 3;


    pub fn get_run_handler_pool_options(&self) -> &RunOptions_Experimental_RunHandlerPoolOptions {
        self.run_handler_pool_options.as_ref().unwrap_or_else(|| RunOptions_Experimental_RunHandlerPoolOptions::default_instance())
    }
    pub fn clear_run_handler_pool_options(&mut self) {
        self.run_handler_pool_options.clear();
    }

    pub fn has_run_handler_pool_options(&self) -> bool {
        self.run_handler_pool_options.is_some()
    }

    // Param is passed by value, moved
    pub fn set_run_handler_pool_options(&mut self, v: RunOptions_Experimental_RunHandlerPoolOptions) {
        self.run_handler_pool_options = ::protobuf::SingularPtrField::some(v);
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_run_handler_pool_options(&mut self) -> &mut RunOptions_Experimental_RunHandlerPoolOptions {
        if self.run_handler_pool_options.is_none() {
            self.run_handler_pool_options.set_default();
        }
        self.run_handler_pool_options.as_mut().unwrap()
    }

    // Take field
    pub fn take_run_handler_pool_options(&mut self) -> RunOptions_Experimental_RunHandlerPoolOptions {
        self.run_handler_pool_options.take().unwrap_or_else(|| RunOptions_Experimental_RunHandlerPoolOptions::new())
    }
}

impl ::protobuf::Message for RunOptions_Experimental {
    fn is_initialized(&self) -> bool {
        for v in &self.run_handler_pool_options {
            if !v.is_initialized() {
                return false;
            }
        };
        true
    }

    fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        while !is.eof()? {
            let (field_number, wire_type) = is.read_tag_unpack()?;
            match field_number {
                1 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int64()?;
                    self.collective_graph_key = tmp;
                },
                2 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.use_run_handler_pool = tmp;
                },
                3 => {
                    ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.run_handler_pool_options)?;
                },
                _ => {
                    ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
                },
            };
        }
        ::std::result::Result::Ok(())
    }

    // Compute sizes of nested messages
    #[allow(unused_variables)]
    fn compute_size(&self) -> u32 {
        let mut my_size = 0;
        if self.collective_graph_key != 0 {
            my_size += ::protobuf::rt::value_size(1, self.collective_graph_key, ::protobuf::wire_format::WireTypeVarint);
        }
        if self.use_run_handler_pool != false {
            my_size += 2;
        }
        if let Some(ref v) = self.run_handler_pool_options.as_ref() {
            let len = v.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        }
        my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
        self.cached_size.set(my_size);
        my_size
    }

    fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        if self.collective_graph_key != 0 {
            os.write_int64(1, self.collective_graph_key)?;
        }
        if self.use_run_handler_pool != false {
            os.write_bool(2, self.use_run_handler_pool)?;
        }
        if let Some(ref v) = self.run_handler_pool_options.as_ref() {
            os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        }
        os.write_unknown_fields(self.get_unknown_fields())?;
        ::std::result::Result::Ok(())
    }

    fn get_cached_size(&self) -> u32 {
        self.cached_size.get()
    }

    fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
        &self.unknown_fields
    }

    fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
        &mut self.unknown_fields
    }

    fn as_any(&self) -> &dyn (::std::any::Any) {
        self as &dyn (::std::any::Any)
    }
    fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
        self as &mut dyn (::std::any::Any)
    }
    fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
        self
    }

    fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
        Self::descriptor_static()
    }

    fn new() -> RunOptions_Experimental {
        RunOptions_Experimental::new()
    }

    fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                let mut fields = ::std::vec::Vec::new();
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
                    "collective_graph_key",
                    |m: &RunOptions_Experimental| { &m.collective_graph_key },
                    |m: &mut RunOptions_Experimental| { &mut m.collective_graph_key },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "use_run_handler_pool",
                    |m: &RunOptions_Experimental| { &m.use_run_handler_pool },
                    |m: &mut RunOptions_Experimental| { &mut m.use_run_handler_pool },
                ));
                fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<RunOptions_Experimental_RunHandlerPoolOptions>>(
                    "run_handler_pool_options",
                    |m: &RunOptions_Experimental| { &m.run_handler_pool_options },
                    |m: &mut RunOptions_Experimental| { &mut m.run_handler_pool_options },
                ));
                ::protobuf::reflect::MessageDescriptor::new_pb_name::<RunOptions_Experimental>(
                    "RunOptions.Experimental",
                    fields,
                    file_descriptor_proto()
                )
            })
        }
    }

    fn default_instance() -> &'static RunOptions_Experimental {
        static mut instance: ::protobuf::lazy::Lazy<RunOptions_Experimental> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            instance.get(RunOptions_Experimental::new)
        }
    }
}

impl ::protobuf::Clear for RunOptions_Experimental {
    fn clear(&mut self) {
        self.collective_graph_key = 0;
        self.use_run_handler_pool = false;
        self.run_handler_pool_options.clear();
        self.unknown_fields.clear();
    }
}

impl ::std::fmt::Debug for RunOptions_Experimental {
    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        ::protobuf::text_format::fmt(self, f)
    }
}

impl ::protobuf::reflect::ProtobufValue for RunOptions_Experimental {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Message(self)
    }
}

#[derive(PartialEq,Clone,Default)]
pub struct RunOptions_Experimental_RunHandlerPoolOptions {
    // message fields
    pub priority: i64,
    // special fields
    pub unknown_fields: ::protobuf::UnknownFields,
    pub cached_size: ::protobuf::CachedSize,
}

impl<'a> ::std::default::Default for &'a RunOptions_Experimental_RunHandlerPoolOptions {
    fn default() -> &'a RunOptions_Experimental_RunHandlerPoolOptions {
        <RunOptions_Experimental_RunHandlerPoolOptions as ::protobuf::Message>::default_instance()
    }
}

impl RunOptions_Experimental_RunHandlerPoolOptions {
    pub fn new() -> RunOptions_Experimental_RunHandlerPoolOptions {
        ::std::default::Default::default()
    }

    // int64 priority = 1;


    pub fn get_priority(&self) -> i64 {
        self.priority
    }
    pub fn clear_priority(&mut self) {
        self.priority = 0;
    }

    // Param is passed by value, moved
    pub fn set_priority(&mut self, v: i64) {
        self.priority = v;
    }
}

impl ::protobuf::Message for RunOptions_Experimental_RunHandlerPoolOptions {
    fn is_initialized(&self) -> bool {
        true
    }

    fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        while !is.eof()? {
            let (field_number, wire_type) = is.read_tag_unpack()?;
            match field_number {
                1 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_int64()?;
                    self.priority = tmp;
                },
                _ => {
                    ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
                },
            };
        }
        ::std::result::Result::Ok(())
    }

    // Compute sizes of nested messages
    #[allow(unused_variables)]
    fn compute_size(&self) -> u32 {
        let mut my_size = 0;
        if self.priority != 0 {
            my_size += ::protobuf::rt::value_size(1, self.priority, ::protobuf::wire_format::WireTypeVarint);
        }
        my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
        self.cached_size.set(my_size);
        my_size
    }

    fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        if self.priority != 0 {
            os.write_int64(1, self.priority)?;
        }
        os.write_unknown_fields(self.get_unknown_fields())?;
        ::std::result::Result::Ok(())
    }

    fn get_cached_size(&self) -> u32 {
        self.cached_size.get()
    }

    fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
        &self.unknown_fields
    }

    fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
        &mut self.unknown_fields
    }

    fn as_any(&self) -> &dyn (::std::any::Any) {
        self as &dyn (::std::any::Any)
    }
    fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
        self as &mut dyn (::std::any::Any)
    }
    fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
        self
    }

    fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
        Self::descriptor_static()
    }

    fn new() -> RunOptions_Experimental_RunHandlerPoolOptions {
        RunOptions_Experimental_RunHandlerPoolOptions::new()
    }

    fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                let mut fields = ::std::vec::Vec::new();
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
                    "priority",
                    |m: &RunOptions_Experimental_RunHandlerPoolOptions| { &m.priority },
                    |m: &mut RunOptions_Experimental_RunHandlerPoolOptions| { &mut m.priority },
                ));
                ::protobuf::reflect::MessageDescriptor::new_pb_name::<RunOptions_Experimental_RunHandlerPoolOptions>(
                    "RunOptions.Experimental.RunHandlerPoolOptions",
                    fields,
                    file_descriptor_proto()
                )
            })
        }
    }

    fn default_instance() -> &'static RunOptions_Experimental_RunHandlerPoolOptions {
        static mut instance: ::protobuf::lazy::Lazy<RunOptions_Experimental_RunHandlerPoolOptions> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            instance.get(RunOptions_Experimental_RunHandlerPoolOptions::new)
        }
    }
}

impl ::protobuf::Clear for RunOptions_Experimental_RunHandlerPoolOptions {
    fn clear(&mut self) {
        self.priority = 0;
        self.unknown_fields.clear();
    }
}

impl ::std::fmt::Debug for RunOptions_Experimental_RunHandlerPoolOptions {
    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        ::protobuf::text_format::fmt(self, f)
    }
}

impl ::protobuf::reflect::ProtobufValue for RunOptions_Experimental_RunHandlerPoolOptions {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Message(self)
    }
}

#[derive(Clone,PartialEq,Eq,Debug,Hash)]
pub enum RunOptions_TraceLevel {
    NO_TRACE = 0,
    SOFTWARE_TRACE = 1,
    HARDWARE_TRACE = 2,
    FULL_TRACE = 3,
}

impl ::protobuf::ProtobufEnum for RunOptions_TraceLevel {
    fn value(&self) -> i32 {
        *self as i32
    }

    fn from_i32(value: i32) -> ::std::option::Option<RunOptions_TraceLevel> {
        match value {
            0 => ::std::option::Option::Some(RunOptions_TraceLevel::NO_TRACE),
            1 => ::std::option::Option::Some(RunOptions_TraceLevel::SOFTWARE_TRACE),
            2 => ::std::option::Option::Some(RunOptions_TraceLevel::HARDWARE_TRACE),
            3 => ::std::option::Option::Some(RunOptions_TraceLevel::FULL_TRACE),
            _ => ::std::option::Option::None
        }
    }

    fn values() -> &'static [Self] {
        static values: &'static [RunOptions_TraceLevel] = &[
            RunOptions_TraceLevel::NO_TRACE,
            RunOptions_TraceLevel::SOFTWARE_TRACE,
            RunOptions_TraceLevel::HARDWARE_TRACE,
            RunOptions_TraceLevel::FULL_TRACE,
        ];
        values
    }

    fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::EnumDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                ::protobuf::reflect::EnumDescriptor::new_pb_name::<RunOptions_TraceLevel>("RunOptions.TraceLevel", file_descriptor_proto())
            })
        }
    }
}

impl ::std::marker::Copy for RunOptions_TraceLevel {
}

impl ::std::default::Default for RunOptions_TraceLevel {
    fn default() -> Self {
        RunOptions_TraceLevel::NO_TRACE
    }
}

impl ::protobuf::reflect::ProtobufValue for RunOptions_TraceLevel {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Enum(self.descriptor())
    }
}

#[derive(PartialEq,Clone,Default)]
pub struct RunMetadata {
    // message fields
    pub step_stats: ::protobuf::SingularPtrField<super::step_stats::StepStats>,
    pub cost_graph: ::protobuf::SingularPtrField<super::cost_graph::CostGraphDef>,
    pub partition_graphs: ::protobuf::RepeatedField<super::graph::GraphDef>,
    pub function_graphs: ::protobuf::RepeatedField<RunMetadata_FunctionGraphs>,
    // special fields
    pub unknown_fields: ::protobuf::UnknownFields,
    pub cached_size: ::protobuf::CachedSize,
}

impl<'a> ::std::default::Default for &'a RunMetadata {
    fn default() -> &'a RunMetadata {
        <RunMetadata as ::protobuf::Message>::default_instance()
    }
}

impl RunMetadata {
    pub fn new() -> RunMetadata {
        ::std::default::Default::default()
    }

    // .tensorflow.StepStats step_stats = 1;


    pub fn get_step_stats(&self) -> &super::step_stats::StepStats {
        self.step_stats.as_ref().unwrap_or_else(|| super::step_stats::StepStats::default_instance())
    }
    pub fn clear_step_stats(&mut self) {
        self.step_stats.clear();
    }

    pub fn has_step_stats(&self) -> bool {
        self.step_stats.is_some()
    }

    // Param is passed by value, moved
    pub fn set_step_stats(&mut self, v: super::step_stats::StepStats) {
        self.step_stats = ::protobuf::SingularPtrField::some(v);
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_step_stats(&mut self) -> &mut super::step_stats::StepStats {
        if self.step_stats.is_none() {
            self.step_stats.set_default();
        }
        self.step_stats.as_mut().unwrap()
    }

    // Take field
    pub fn take_step_stats(&mut self) -> super::step_stats::StepStats {
        self.step_stats.take().unwrap_or_else(|| super::step_stats::StepStats::new())
    }

    // .tensorflow.CostGraphDef cost_graph = 2;


    pub fn get_cost_graph(&self) -> &super::cost_graph::CostGraphDef {
        self.cost_graph.as_ref().unwrap_or_else(|| super::cost_graph::CostGraphDef::default_instance())
    }
    pub fn clear_cost_graph(&mut self) {
        self.cost_graph.clear();
    }

    pub fn has_cost_graph(&self) -> bool {
        self.cost_graph.is_some()
    }

    // Param is passed by value, moved
    pub fn set_cost_graph(&mut self, v: super::cost_graph::CostGraphDef) {
        self.cost_graph = ::protobuf::SingularPtrField::some(v);
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_cost_graph(&mut self) -> &mut super::cost_graph::CostGraphDef {
        if self.cost_graph.is_none() {
            self.cost_graph.set_default();
        }
        self.cost_graph.as_mut().unwrap()
    }

    // Take field
    pub fn take_cost_graph(&mut self) -> super::cost_graph::CostGraphDef {
        self.cost_graph.take().unwrap_or_else(|| super::cost_graph::CostGraphDef::new())
    }

    // repeated .tensorflow.GraphDef partition_graphs = 3;


    pub fn get_partition_graphs(&self) -> &[super::graph::GraphDef] {
        &self.partition_graphs
    }
    pub fn clear_partition_graphs(&mut self) {
        self.partition_graphs.clear();
    }

    // Param is passed by value, moved
    pub fn set_partition_graphs(&mut self, v: ::protobuf::RepeatedField<super::graph::GraphDef>) {
        self.partition_graphs = v;
    }

    // Mutable pointer to the field.
    pub fn mut_partition_graphs(&mut self) -> &mut ::protobuf::RepeatedField<super::graph::GraphDef> {
        &mut self.partition_graphs
    }

    // Take field
    pub fn take_partition_graphs(&mut self) -> ::protobuf::RepeatedField<super::graph::GraphDef> {
        ::std::mem::replace(&mut self.partition_graphs, ::protobuf::RepeatedField::new())
    }

    // repeated .tensorflow.RunMetadata.FunctionGraphs function_graphs = 4;


    pub fn get_function_graphs(&self) -> &[RunMetadata_FunctionGraphs] {
        &self.function_graphs
    }
    pub fn clear_function_graphs(&mut self) {
        self.function_graphs.clear();
    }

    // Param is passed by value, moved
    pub fn set_function_graphs(&mut self, v: ::protobuf::RepeatedField<RunMetadata_FunctionGraphs>) {
        self.function_graphs = v;
    }

    // Mutable pointer to the field.
    pub fn mut_function_graphs(&mut self) -> &mut ::protobuf::RepeatedField<RunMetadata_FunctionGraphs> {
        &mut self.function_graphs
    }

    // Take field
    pub fn take_function_graphs(&mut self) -> ::protobuf::RepeatedField<RunMetadata_FunctionGraphs> {
        ::std::mem::replace(&mut self.function_graphs, ::protobuf::RepeatedField::new())
    }
}

impl ::protobuf::Message for RunMetadata {
    fn is_initialized(&self) -> bool {
        for v in &self.step_stats {
            if !v.is_initialized() {
                return false;
            }
        };
        for v in &self.cost_graph {
            if !v.is_initialized() {
                return false;
            }
        };
        for v in &self.partition_graphs {
            if !v.is_initialized() {
                return false;
            }
        };
        for v in &self.function_graphs {
            if !v.is_initialized() {
                return false;
            }
        };
        true
    }

    fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        while !is.eof()? {
            let (field_number, wire_type) = is.read_tag_unpack()?;
            match field_number {
                1 => {
                    ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.step_stats)?;
                },
                2 => {
                    ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.cost_graph)?;
                },
                3 => {
                    ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.partition_graphs)?;
                },
                4 => {
                    ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.function_graphs)?;
                },
                _ => {
                    ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
                },
            };
        }
        ::std::result::Result::Ok(())
    }

    // Compute sizes of nested messages
    #[allow(unused_variables)]
    fn compute_size(&self) -> u32 {
        let mut my_size = 0;
        if let Some(ref v) = self.step_stats.as_ref() {
            let len = v.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        }
        if let Some(ref v) = self.cost_graph.as_ref() {
            let len = v.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        }
        for value in &self.partition_graphs {
            let len = value.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        };
        for value in &self.function_graphs {
            let len = value.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        };
        my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
        self.cached_size.set(my_size);
        my_size
    }

    fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        if let Some(ref v) = self.step_stats.as_ref() {
            os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        }
        if let Some(ref v) = self.cost_graph.as_ref() {
            os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        }
        for v in &self.partition_graphs {
            os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        };
        for v in &self.function_graphs {
            os.write_tag(4, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        };
        os.write_unknown_fields(self.get_unknown_fields())?;
        ::std::result::Result::Ok(())
    }

    fn get_cached_size(&self) -> u32 {
        self.cached_size.get()
    }

    fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
        &self.unknown_fields
    }

    fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
        &mut self.unknown_fields
    }

    fn as_any(&self) -> &dyn (::std::any::Any) {
        self as &dyn (::std::any::Any)
    }
    fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
        self as &mut dyn (::std::any::Any)
    }
    fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
        self
    }

    fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
        Self::descriptor_static()
    }

    fn new() -> RunMetadata {
        RunMetadata::new()
    }

    fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                let mut fields = ::std::vec::Vec::new();
                fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::step_stats::StepStats>>(
                    "step_stats",
                    |m: &RunMetadata| { &m.step_stats },
                    |m: &mut RunMetadata| { &mut m.step_stats },
                ));
                fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::cost_graph::CostGraphDef>>(
                    "cost_graph",
                    |m: &RunMetadata| { &m.cost_graph },
                    |m: &mut RunMetadata| { &mut m.cost_graph },
                ));
                fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::graph::GraphDef>>(
                    "partition_graphs",
                    |m: &RunMetadata| { &m.partition_graphs },
                    |m: &mut RunMetadata| { &mut m.partition_graphs },
                ));
                fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<RunMetadata_FunctionGraphs>>(
                    "function_graphs",
                    |m: &RunMetadata| { &m.function_graphs },
                    |m: &mut RunMetadata| { &mut m.function_graphs },
                ));
                ::protobuf::reflect::MessageDescriptor::new_pb_name::<RunMetadata>(
                    "RunMetadata",
                    fields,
                    file_descriptor_proto()
                )
            })
        }
    }

    fn default_instance() -> &'static RunMetadata {
        static mut instance: ::protobuf::lazy::Lazy<RunMetadata> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            instance.get(RunMetadata::new)
        }
    }
}

impl ::protobuf::Clear for RunMetadata {
    fn clear(&mut self) {
        self.step_stats.clear();
        self.cost_graph.clear();
        self.partition_graphs.clear();
        self.function_graphs.clear();
        self.unknown_fields.clear();
    }
}

impl ::std::fmt::Debug for RunMetadata {
    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        ::protobuf::text_format::fmt(self, f)
    }
}

impl ::protobuf::reflect::ProtobufValue for RunMetadata {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Message(self)
    }
}

#[derive(PartialEq,Clone,Default)]
pub struct RunMetadata_FunctionGraphs {
    // message fields
    pub partition_graphs: ::protobuf::RepeatedField<super::graph::GraphDef>,
    pub pre_optimization_graph: ::protobuf::SingularPtrField<super::graph::GraphDef>,
    pub post_optimization_graph: ::protobuf::SingularPtrField<super::graph::GraphDef>,
    // special fields
    pub unknown_fields: ::protobuf::UnknownFields,
    pub cached_size: ::protobuf::CachedSize,
}

impl<'a> ::std::default::Default for &'a RunMetadata_FunctionGraphs {
    fn default() -> &'a RunMetadata_FunctionGraphs {
        <RunMetadata_FunctionGraphs as ::protobuf::Message>::default_instance()
    }
}

impl RunMetadata_FunctionGraphs {
    pub fn new() -> RunMetadata_FunctionGraphs {
        ::std::default::Default::default()
    }

    // repeated .tensorflow.GraphDef partition_graphs = 1;


    pub fn get_partition_graphs(&self) -> &[super::graph::GraphDef] {
        &self.partition_graphs
    }
    pub fn clear_partition_graphs(&mut self) {
        self.partition_graphs.clear();
    }

    // Param is passed by value, moved
    pub fn set_partition_graphs(&mut self, v: ::protobuf::RepeatedField<super::graph::GraphDef>) {
        self.partition_graphs = v;
    }

    // Mutable pointer to the field.
    pub fn mut_partition_graphs(&mut self) -> &mut ::protobuf::RepeatedField<super::graph::GraphDef> {
        &mut self.partition_graphs
    }

    // Take field
    pub fn take_partition_graphs(&mut self) -> ::protobuf::RepeatedField<super::graph::GraphDef> {
        ::std::mem::replace(&mut self.partition_graphs, ::protobuf::RepeatedField::new())
    }

    // .tensorflow.GraphDef pre_optimization_graph = 2;


    pub fn get_pre_optimization_graph(&self) -> &super::graph::GraphDef {
        self.pre_optimization_graph.as_ref().unwrap_or_else(|| super::graph::GraphDef::default_instance())
    }
    pub fn clear_pre_optimization_graph(&mut self) {
        self.pre_optimization_graph.clear();
    }

    pub fn has_pre_optimization_graph(&self) -> bool {
        self.pre_optimization_graph.is_some()
    }

    // Param is passed by value, moved
    pub fn set_pre_optimization_graph(&mut self, v: super::graph::GraphDef) {
        self.pre_optimization_graph = ::protobuf::SingularPtrField::some(v);
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_pre_optimization_graph(&mut self) -> &mut super::graph::GraphDef {
        if self.pre_optimization_graph.is_none() {
            self.pre_optimization_graph.set_default();
        }
        self.pre_optimization_graph.as_mut().unwrap()
    }

    // Take field
    pub fn take_pre_optimization_graph(&mut self) -> super::graph::GraphDef {
        self.pre_optimization_graph.take().unwrap_or_else(|| super::graph::GraphDef::new())
    }

    // .tensorflow.GraphDef post_optimization_graph = 3;


    pub fn get_post_optimization_graph(&self) -> &super::graph::GraphDef {
        self.post_optimization_graph.as_ref().unwrap_or_else(|| super::graph::GraphDef::default_instance())
    }
    pub fn clear_post_optimization_graph(&mut self) {
        self.post_optimization_graph.clear();
    }

    pub fn has_post_optimization_graph(&self) -> bool {
        self.post_optimization_graph.is_some()
    }

    // Param is passed by value, moved
    pub fn set_post_optimization_graph(&mut self, v: super::graph::GraphDef) {
        self.post_optimization_graph = ::protobuf::SingularPtrField::some(v);
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_post_optimization_graph(&mut self) -> &mut super::graph::GraphDef {
        if self.post_optimization_graph.is_none() {
            self.post_optimization_graph.set_default();
        }
        self.post_optimization_graph.as_mut().unwrap()
    }

    // Take field
    pub fn take_post_optimization_graph(&mut self) -> super::graph::GraphDef {
        self.post_optimization_graph.take().unwrap_or_else(|| super::graph::GraphDef::new())
    }
}

impl ::protobuf::Message for RunMetadata_FunctionGraphs {
    fn is_initialized(&self) -> bool {
        for v in &self.partition_graphs {
            if !v.is_initialized() {
                return false;
            }
        };
        for v in &self.pre_optimization_graph {
            if !v.is_initialized() {
                return false;
            }
        };
        for v in &self.post_optimization_graph {
            if !v.is_initialized() {
                return false;
            }
        };
        true
    }

    fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        while !is.eof()? {
            let (field_number, wire_type) = is.read_tag_unpack()?;
            match field_number {
                1 => {
                    ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.partition_graphs)?;
                },
                2 => {
                    ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.pre_optimization_graph)?;
                },
                3 => {
                    ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.post_optimization_graph)?;
                },
                _ => {
                    ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
                },
            };
        }
        ::std::result::Result::Ok(())
    }

    // Compute sizes of nested messages
    #[allow(unused_variables)]
    fn compute_size(&self) -> u32 {
        let mut my_size = 0;
        for value in &self.partition_graphs {
            let len = value.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        };
        if let Some(ref v) = self.pre_optimization_graph.as_ref() {
            let len = v.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        }
        if let Some(ref v) = self.post_optimization_graph.as_ref() {
            let len = v.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        }
        my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
        self.cached_size.set(my_size);
        my_size
    }

    fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        for v in &self.partition_graphs {
            os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        };
        if let Some(ref v) = self.pre_optimization_graph.as_ref() {
            os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        }
        if let Some(ref v) = self.post_optimization_graph.as_ref() {
            os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        }
        os.write_unknown_fields(self.get_unknown_fields())?;
        ::std::result::Result::Ok(())
    }

    fn get_cached_size(&self) -> u32 {
        self.cached_size.get()
    }

    fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
        &self.unknown_fields
    }

    fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
        &mut self.unknown_fields
    }

    fn as_any(&self) -> &dyn (::std::any::Any) {
        self as &dyn (::std::any::Any)
    }
    fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
        self as &mut dyn (::std::any::Any)
    }
    fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
        self
    }

    fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
        Self::descriptor_static()
    }

    fn new() -> RunMetadata_FunctionGraphs {
        RunMetadata_FunctionGraphs::new()
    }

    fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                let mut fields = ::std::vec::Vec::new();
                fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::graph::GraphDef>>(
                    "partition_graphs",
                    |m: &RunMetadata_FunctionGraphs| { &m.partition_graphs },
                    |m: &mut RunMetadata_FunctionGraphs| { &mut m.partition_graphs },
                ));
                fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::graph::GraphDef>>(
                    "pre_optimization_graph",
                    |m: &RunMetadata_FunctionGraphs| { &m.pre_optimization_graph },
                    |m: &mut RunMetadata_FunctionGraphs| { &mut m.pre_optimization_graph },
                ));
                fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::graph::GraphDef>>(
                    "post_optimization_graph",
                    |m: &RunMetadata_FunctionGraphs| { &m.post_optimization_graph },
                    |m: &mut RunMetadata_FunctionGraphs| { &mut m.post_optimization_graph },
                ));
                ::protobuf::reflect::MessageDescriptor::new_pb_name::<RunMetadata_FunctionGraphs>(
                    "RunMetadata.FunctionGraphs",
                    fields,
                    file_descriptor_proto()
                )
            })
        }
    }

    fn default_instance() -> &'static RunMetadata_FunctionGraphs {
        static mut instance: ::protobuf::lazy::Lazy<RunMetadata_FunctionGraphs> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            instance.get(RunMetadata_FunctionGraphs::new)
        }
    }
}

impl ::protobuf::Clear for RunMetadata_FunctionGraphs {
    fn clear(&mut self) {
        self.partition_graphs.clear();
        self.pre_optimization_graph.clear();
        self.post_optimization_graph.clear();
        self.unknown_fields.clear();
    }
}

impl ::std::fmt::Debug for RunMetadata_FunctionGraphs {
    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        ::protobuf::text_format::fmt(self, f)
    }
}

impl ::protobuf::reflect::ProtobufValue for RunMetadata_FunctionGraphs {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Message(self)
    }
}

#[derive(PartialEq,Clone,Default)]
pub struct TensorConnection {
    // message fields
    pub from_tensor: ::std::string::String,
    pub to_tensor: ::std::string::String,
    // special fields
    pub unknown_fields: ::protobuf::UnknownFields,
    pub cached_size: ::protobuf::CachedSize,
}

impl<'a> ::std::default::Default for &'a TensorConnection {
    fn default() -> &'a TensorConnection {
        <TensorConnection as ::protobuf::Message>::default_instance()
    }
}

impl TensorConnection {
    pub fn new() -> TensorConnection {
        ::std::default::Default::default()
    }

    // string from_tensor = 1;


    pub fn get_from_tensor(&self) -> &str {
        &self.from_tensor
    }
    pub fn clear_from_tensor(&mut self) {
        self.from_tensor.clear();
    }

    // Param is passed by value, moved
    pub fn set_from_tensor(&mut self, v: ::std::string::String) {
        self.from_tensor = v;
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_from_tensor(&mut self) -> &mut ::std::string::String {
        &mut self.from_tensor
    }

    // Take field
    pub fn take_from_tensor(&mut self) -> ::std::string::String {
        ::std::mem::replace(&mut self.from_tensor, ::std::string::String::new())
    }

    // string to_tensor = 2;


    pub fn get_to_tensor(&self) -> &str {
        &self.to_tensor
    }
    pub fn clear_to_tensor(&mut self) {
        self.to_tensor.clear();
    }

    // Param is passed by value, moved
    pub fn set_to_tensor(&mut self, v: ::std::string::String) {
        self.to_tensor = v;
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_to_tensor(&mut self) -> &mut ::std::string::String {
        &mut self.to_tensor
    }

    // Take field
    pub fn take_to_tensor(&mut self) -> ::std::string::String {
        ::std::mem::replace(&mut self.to_tensor, ::std::string::String::new())
    }
}

impl ::protobuf::Message for TensorConnection {
    fn is_initialized(&self) -> bool {
        true
    }

    fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        while !is.eof()? {
            let (field_number, wire_type) = is.read_tag_unpack()?;
            match field_number {
                1 => {
                    ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.from_tensor)?;
                },
                2 => {
                    ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.to_tensor)?;
                },
                _ => {
                    ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
                },
            };
        }
        ::std::result::Result::Ok(())
    }

    // Compute sizes of nested messages
    #[allow(unused_variables)]
    fn compute_size(&self) -> u32 {
        let mut my_size = 0;
        if !self.from_tensor.is_empty() {
            my_size += ::protobuf::rt::string_size(1, &self.from_tensor);
        }
        if !self.to_tensor.is_empty() {
            my_size += ::protobuf::rt::string_size(2, &self.to_tensor);
        }
        my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
        self.cached_size.set(my_size);
        my_size
    }

    fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        if !self.from_tensor.is_empty() {
            os.write_string(1, &self.from_tensor)?;
        }
        if !self.to_tensor.is_empty() {
            os.write_string(2, &self.to_tensor)?;
        }
        os.write_unknown_fields(self.get_unknown_fields())?;
        ::std::result::Result::Ok(())
    }

    fn get_cached_size(&self) -> u32 {
        self.cached_size.get()
    }

    fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
        &self.unknown_fields
    }

    fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
        &mut self.unknown_fields
    }

    fn as_any(&self) -> &dyn (::std::any::Any) {
        self as &dyn (::std::any::Any)
    }
    fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
        self as &mut dyn (::std::any::Any)
    }
    fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
        self
    }

    fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
        Self::descriptor_static()
    }

    fn new() -> TensorConnection {
        TensorConnection::new()
    }

    fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                let mut fields = ::std::vec::Vec::new();
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
                    "from_tensor",
                    |m: &TensorConnection| { &m.from_tensor },
                    |m: &mut TensorConnection| { &mut m.from_tensor },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
                    "to_tensor",
                    |m: &TensorConnection| { &m.to_tensor },
                    |m: &mut TensorConnection| { &mut m.to_tensor },
                ));
                ::protobuf::reflect::MessageDescriptor::new_pb_name::<TensorConnection>(
                    "TensorConnection",
                    fields,
                    file_descriptor_proto()
                )
            })
        }
    }

    fn default_instance() -> &'static TensorConnection {
        static mut instance: ::protobuf::lazy::Lazy<TensorConnection> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            instance.get(TensorConnection::new)
        }
    }
}

impl ::protobuf::Clear for TensorConnection {
    fn clear(&mut self) {
        self.from_tensor.clear();
        self.to_tensor.clear();
        self.unknown_fields.clear();
    }
}

impl ::std::fmt::Debug for TensorConnection {
    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        ::protobuf::text_format::fmt(self, f)
    }
}

impl ::protobuf::reflect::ProtobufValue for TensorConnection {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Message(self)
    }
}

#[derive(PartialEq,Clone,Default)]
pub struct CallableOptions {
    // message fields
    pub feed: ::protobuf::RepeatedField<::std::string::String>,
    pub fetch: ::protobuf::RepeatedField<::std::string::String>,
    pub target: ::protobuf::RepeatedField<::std::string::String>,
    pub run_options: ::protobuf::SingularPtrField<RunOptions>,
    pub tensor_connection: ::protobuf::RepeatedField<TensorConnection>,
    pub feed_devices: ::std::collections::HashMap<::std::string::String, ::std::string::String>,
    pub fetch_devices: ::std::collections::HashMap<::std::string::String, ::std::string::String>,
    pub fetch_skip_sync: bool,
    // special fields
    pub unknown_fields: ::protobuf::UnknownFields,
    pub cached_size: ::protobuf::CachedSize,
}

impl<'a> ::std::default::Default for &'a CallableOptions {
    fn default() -> &'a CallableOptions {
        <CallableOptions as ::protobuf::Message>::default_instance()
    }
}

impl CallableOptions {
    pub fn new() -> CallableOptions {
        ::std::default::Default::default()
    }

    // repeated string feed = 1;


    pub fn get_feed(&self) -> &[::std::string::String] {
        &self.feed
    }
    pub fn clear_feed(&mut self) {
        self.feed.clear();
    }

    // Param is passed by value, moved
    pub fn set_feed(&mut self, v: ::protobuf::RepeatedField<::std::string::String>) {
        self.feed = v;
    }

    // Mutable pointer to the field.
    pub fn mut_feed(&mut self) -> &mut ::protobuf::RepeatedField<::std::string::String> {
        &mut self.feed
    }

    // Take field
    pub fn take_feed(&mut self) -> ::protobuf::RepeatedField<::std::string::String> {
        ::std::mem::replace(&mut self.feed, ::protobuf::RepeatedField::new())
    }

    // repeated string fetch = 2;


    pub fn get_fetch(&self) -> &[::std::string::String] {
        &self.fetch
    }
    pub fn clear_fetch(&mut self) {
        self.fetch.clear();
    }

    // Param is passed by value, moved
    pub fn set_fetch(&mut self, v: ::protobuf::RepeatedField<::std::string::String>) {
        self.fetch = v;
    }

    // Mutable pointer to the field.
    pub fn mut_fetch(&mut self) -> &mut ::protobuf::RepeatedField<::std::string::String> {
        &mut self.fetch
    }

    // Take field
    pub fn take_fetch(&mut self) -> ::protobuf::RepeatedField<::std::string::String> {
        ::std::mem::replace(&mut self.fetch, ::protobuf::RepeatedField::new())
    }

    // repeated string target = 3;


    pub fn get_target(&self) -> &[::std::string::String] {
        &self.target
    }
    pub fn clear_target(&mut self) {
        self.target.clear();
    }

    // Param is passed by value, moved
    pub fn set_target(&mut self, v: ::protobuf::RepeatedField<::std::string::String>) {
        self.target = v;
    }

    // Mutable pointer to the field.
    pub fn mut_target(&mut self) -> &mut ::protobuf::RepeatedField<::std::string::String> {
        &mut self.target
    }

    // Take field
    pub fn take_target(&mut self) -> ::protobuf::RepeatedField<::std::string::String> {
        ::std::mem::replace(&mut self.target, ::protobuf::RepeatedField::new())
    }

    // .tensorflow.RunOptions run_options = 4;


    pub fn get_run_options(&self) -> &RunOptions {
        self.run_options.as_ref().unwrap_or_else(|| RunOptions::default_instance())
    }
    pub fn clear_run_options(&mut self) {
        self.run_options.clear();
    }

    pub fn has_run_options(&self) -> bool {
        self.run_options.is_some()
    }

    // Param is passed by value, moved
    pub fn set_run_options(&mut self, v: RunOptions) {
        self.run_options = ::protobuf::SingularPtrField::some(v);
    }

    // Mutable pointer to the field.
    // If field is not initialized, it is initialized with default value first.
    pub fn mut_run_options(&mut self) -> &mut RunOptions {
        if self.run_options.is_none() {
            self.run_options.set_default();
        }
        self.run_options.as_mut().unwrap()
    }

    // Take field
    pub fn take_run_options(&mut self) -> RunOptions {
        self.run_options.take().unwrap_or_else(|| RunOptions::new())
    }

    // repeated .tensorflow.TensorConnection tensor_connection = 5;


    pub fn get_tensor_connection(&self) -> &[TensorConnection] {
        &self.tensor_connection
    }
    pub fn clear_tensor_connection(&mut self) {
        self.tensor_connection.clear();
    }

    // Param is passed by value, moved
    pub fn set_tensor_connection(&mut self, v: ::protobuf::RepeatedField<TensorConnection>) {
        self.tensor_connection = v;
    }

    // Mutable pointer to the field.
    pub fn mut_tensor_connection(&mut self) -> &mut ::protobuf::RepeatedField<TensorConnection> {
        &mut self.tensor_connection
    }

    // Take field
    pub fn take_tensor_connection(&mut self) -> ::protobuf::RepeatedField<TensorConnection> {
        ::std::mem::replace(&mut self.tensor_connection, ::protobuf::RepeatedField::new())
    }

    // repeated .tensorflow.CallableOptions.FeedDevicesEntry feed_devices = 6;


    pub fn get_feed_devices(&self) -> &::std::collections::HashMap<::std::string::String, ::std::string::String> {
        &self.feed_devices
    }
    pub fn clear_feed_devices(&mut self) {
        self.feed_devices.clear();
    }

    // Param is passed by value, moved
    pub fn set_feed_devices(&mut self, v: ::std::collections::HashMap<::std::string::String, ::std::string::String>) {
        self.feed_devices = v;
    }

    // Mutable pointer to the field.
    pub fn mut_feed_devices(&mut self) -> &mut ::std::collections::HashMap<::std::string::String, ::std::string::String> {
        &mut self.feed_devices
    }

    // Take field
    pub fn take_feed_devices(&mut self) -> ::std::collections::HashMap<::std::string::String, ::std::string::String> {
        ::std::mem::replace(&mut self.feed_devices, ::std::collections::HashMap::new())
    }

    // repeated .tensorflow.CallableOptions.FetchDevicesEntry fetch_devices = 7;


    pub fn get_fetch_devices(&self) -> &::std::collections::HashMap<::std::string::String, ::std::string::String> {
        &self.fetch_devices
    }
    pub fn clear_fetch_devices(&mut self) {
        self.fetch_devices.clear();
    }

    // Param is passed by value, moved
    pub fn set_fetch_devices(&mut self, v: ::std::collections::HashMap<::std::string::String, ::std::string::String>) {
        self.fetch_devices = v;
    }

    // Mutable pointer to the field.
    pub fn mut_fetch_devices(&mut self) -> &mut ::std::collections::HashMap<::std::string::String, ::std::string::String> {
        &mut self.fetch_devices
    }

    // Take field
    pub fn take_fetch_devices(&mut self) -> ::std::collections::HashMap<::std::string::String, ::std::string::String> {
        ::std::mem::replace(&mut self.fetch_devices, ::std::collections::HashMap::new())
    }

    // bool fetch_skip_sync = 8;


    pub fn get_fetch_skip_sync(&self) -> bool {
        self.fetch_skip_sync
    }
    pub fn clear_fetch_skip_sync(&mut self) {
        self.fetch_skip_sync = false;
    }

    // Param is passed by value, moved
    pub fn set_fetch_skip_sync(&mut self, v: bool) {
        self.fetch_skip_sync = v;
    }
}

impl ::protobuf::Message for CallableOptions {
    fn is_initialized(&self) -> bool {
        for v in &self.run_options {
            if !v.is_initialized() {
                return false;
            }
        };
        for v in &self.tensor_connection {
            if !v.is_initialized() {
                return false;
            }
        };
        true
    }

    fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        while !is.eof()? {
            let (field_number, wire_type) = is.read_tag_unpack()?;
            match field_number {
                1 => {
                    ::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.feed)?;
                },
                2 => {
                    ::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.fetch)?;
                },
                3 => {
                    ::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.target)?;
                },
                4 => {
                    ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.run_options)?;
                },
                5 => {
                    ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.tensor_connection)?;
                },
                6 => {
                    ::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(wire_type, is, &mut self.feed_devices)?;
                },
                7 => {
                    ::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(wire_type, is, &mut self.fetch_devices)?;
                },
                8 => {
                    if wire_type != ::protobuf::wire_format::WireTypeVarint {
                        return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
                    }
                    let tmp = is.read_bool()?;
                    self.fetch_skip_sync = tmp;
                },
                _ => {
                    ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
                },
            };
        }
        ::std::result::Result::Ok(())
    }

    // Compute sizes of nested messages
    #[allow(unused_variables)]
    fn compute_size(&self) -> u32 {
        let mut my_size = 0;
        for value in &self.feed {
            my_size += ::protobuf::rt::string_size(1, &value);
        };
        for value in &self.fetch {
            my_size += ::protobuf::rt::string_size(2, &value);
        };
        for value in &self.target {
            my_size += ::protobuf::rt::string_size(3, &value);
        };
        if let Some(ref v) = self.run_options.as_ref() {
            let len = v.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        }
        for value in &self.tensor_connection {
            let len = value.compute_size();
            my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
        };
        my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(6, &self.feed_devices);
        my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(7, &self.fetch_devices);
        if self.fetch_skip_sync != false {
            my_size += 2;
        }
        my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
        self.cached_size.set(my_size);
        my_size
    }

    fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
        for v in &self.feed {
            os.write_string(1, &v)?;
        };
        for v in &self.fetch {
            os.write_string(2, &v)?;
        };
        for v in &self.target {
            os.write_string(3, &v)?;
        };
        if let Some(ref v) = self.run_options.as_ref() {
            os.write_tag(4, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        }
        for v in &self.tensor_connection {
            os.write_tag(5, ::protobuf::wire_format::WireTypeLengthDelimited)?;
            os.write_raw_varint32(v.get_cached_size())?;
            v.write_to_with_cached_sizes(os)?;
        };
        ::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(6, &self.feed_devices, os)?;
        ::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(7, &self.fetch_devices, os)?;
        if self.fetch_skip_sync != false {
            os.write_bool(8, self.fetch_skip_sync)?;
        }
        os.write_unknown_fields(self.get_unknown_fields())?;
        ::std::result::Result::Ok(())
    }

    fn get_cached_size(&self) -> u32 {
        self.cached_size.get()
    }

    fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
        &self.unknown_fields
    }

    fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
        &mut self.unknown_fields
    }

    fn as_any(&self) -> &dyn (::std::any::Any) {
        self as &dyn (::std::any::Any)
    }
    fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
        self as &mut dyn (::std::any::Any)
    }
    fn into_any(self: Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
        self
    }

    fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
        Self::descriptor_static()
    }

    fn new() -> CallableOptions {
        CallableOptions::new()
    }

    fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
        static mut descriptor: ::protobuf::lazy::Lazy<::protobuf::reflect::MessageDescriptor> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            descriptor.get(|| {
                let mut fields = ::std::vec::Vec::new();
                fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
                    "feed",
                    |m: &CallableOptions| { &m.feed },
                    |m: &mut CallableOptions| { &mut m.feed },
                ));
                fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
                    "fetch",
                    |m: &CallableOptions| { &m.fetch },
                    |m: &mut CallableOptions| { &mut m.fetch },
                ));
                fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
                    "target",
                    |m: &CallableOptions| { &m.target },
                    |m: &mut CallableOptions| { &mut m.target },
                ));
                fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<RunOptions>>(
                    "run_options",
                    |m: &CallableOptions| { &m.run_options },
                    |m: &mut CallableOptions| { &mut m.run_options },
                ));
                fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<TensorConnection>>(
                    "tensor_connection",
                    |m: &CallableOptions| { &m.tensor_connection },
                    |m: &mut CallableOptions| { &mut m.tensor_connection },
                ));
                fields.push(::protobuf::reflect::accessor::make_map_accessor::<_, ::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(
                    "feed_devices",
                    |m: &CallableOptions| { &m.feed_devices },
                    |m: &mut CallableOptions| { &mut m.feed_devices },
                ));
                fields.push(::protobuf::reflect::accessor::make_map_accessor::<_, ::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(
                    "fetch_devices",
                    |m: &CallableOptions| { &m.fetch_devices },
                    |m: &mut CallableOptions| { &mut m.fetch_devices },
                ));
                fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
                    "fetch_skip_sync",
                    |m: &CallableOptions| { &m.fetch_skip_sync },
                    |m: &mut CallableOptions| { &mut m.fetch_skip_sync },
                ));
                ::protobuf::reflect::MessageDescriptor::new_pb_name::<CallableOptions>(
                    "CallableOptions",
                    fields,
                    file_descriptor_proto()
                )
            })
        }
    }

    fn default_instance() -> &'static CallableOptions {
        static mut instance: ::protobuf::lazy::Lazy<CallableOptions> = ::protobuf::lazy::Lazy::INIT;
        unsafe {
            instance.get(CallableOptions::new)
        }
    }
}

impl ::protobuf::Clear for CallableOptions {
    fn clear(&mut self) {
        self.feed.clear();
        self.fetch.clear();
        self.target.clear();
        self.run_options.clear();
        self.tensor_connection.clear();
        self.feed_devices.clear();
        self.fetch_devices.clear();
        self.fetch_skip_sync = false;
        self.unknown_fields.clear();
    }
}

impl ::std::fmt::Debug for CallableOptions {
    fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
        ::protobuf::text_format::fmt(self, f)
    }
}

impl ::protobuf::reflect::ProtobufValue for CallableOptions {
    fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
        ::protobuf::reflect::ReflectValueRef::Message(self)
    }
}

static file_descriptor_proto_data: &'static [u8] = b"\
    \n%tensorflow/core/protobuf/config.proto\x12\ntensorflow\x1a*tensorflow/\
    core/framework/cost_graph.proto\x1a%tensorflow/core/framework/graph.prot\
    o\x1a*tensorflow/core/framework/step_stats.proto\x1a&tensorflow/core/pro\
    tobuf/cluster.proto\x1a$tensorflow/core/protobuf/debug.proto\x1a.tensorf\
    low/core/protobuf/rewriter_config.proto\"\xca\x08\n\nGPUOptions\x12D\n\
    \x1fper_process_gpu_memory_fraction\x18\x01\x20\x01(\x01R\x1bperProcessG\
    puMemoryFraction\x12!\n\x0callow_growth\x18\x04\x20\x01(\x08R\x0ballowGr\
    owth\x12%\n\x0eallocator_type\x18\x02\x20\x01(\tR\rallocatorType\x126\n\
    \x17deferred_deletion_bytes\x18\x03\x20\x01(\x03R\x15deferredDeletionByt\
    es\x12.\n\x13visible_device_list\x18\x05\x20\x01(\tR\x11visibleDeviceLis\
    t\x12;\n\x1apolling_active_delay_usecs\x18\x06\x20\x01(\x05R\x17pollingA\
    ctiveDelayUsecs\x12?\n\x1cpolling_inactive_delay_msecs\x18\x07\x20\x01(\
    \x05R\x19pollingInactiveDelayMsecs\x120\n\x14force_gpu_compatible\x18\
    \x08\x20\x01(\x08R\x12forceGpuCompatible\x12G\n\x0cexperimental\x18\t\
    \x20\x01(\x0b2#.tensorflow.GPUOptions.ExperimentalR\x0cexperimental\x1a\
    \xca\x04\n\x0cExperimental\x12[\n\x0fvirtual_devices\x18\x01\x20\x03(\
    \x0b22.tensorflow.GPUOptions.Experimental.VirtualDevicesR\x0evirtualDevi\
    ces\x12,\n\x12use_unified_memory\x18\x02\x20\x01(\x08R\x10useUnifiedMemo\
    ry\x12;\n\x1bnum_dev_to_dev_copy_streams\x18\x03\x20\x01(\x05R\x16numDev\
    ToDevCopyStreams\x122\n\x15collective_ring_order\x18\x04\x20\x01(\tR\x13\
    collectiveRingOrder\x123\n\x15timestamped_allocator\x18\x05\x20\x01(\x08\
    R\x14timestampedAllocator\x12=\n\x1bkernel_tracker_max_interval\x18\x07\
    \x20\x01(\x05R\x18kernelTrackerMaxInterval\x127\n\x18kernel_tracker_max_\
    bytes\x18\x08\x20\x01(\x05R\x15kernelTrackerMaxBytes\x12;\n\x1akernel_tr\
    acker_max_pending\x18\t\x20\x01(\x05R\x17kernelTrackerMaxPending\x1aT\n\
    \x0eVirtualDevices\x12&\n\x0fmemory_limit_mb\x18\x01\x20\x03(\x02R\rmemo\
    ryLimitMb\x12\x1a\n\x08priority\x18\x02\x20\x03(\x05R\x08priority\"\x82\
    \x04\n\x10OptimizerOptions\x12M\n#do_common_subexpression_elimination\
    \x18\x01\x20\x01(\x08R\x20doCommonSubexpressionElimination\x12.\n\x13do_\
    constant_folding\x18\x02\x20\x01(\x08R\x11doConstantFolding\x12>\n\x1cma\
    x_folded_constant_in_bytes\x18\x06\x20\x01(\x03R\x18maxFoldedConstantInB\
    ytes\x120\n\x14do_function_inlining\x18\x04\x20\x01(\x08R\x12doFunctionI\
    nlining\x12?\n\topt_level\x18\x03\x20\x01(\x0e2\".tensorflow.OptimizerOp\
    tions.LevelR\x08optLevel\x12U\n\x10global_jit_level\x18\x05\x20\x01(\x0e\
    2+.tensorflow.OptimizerOptions.GlobalJitLevelR\x0eglobalJitLevel\"\x20\n\
    \x05Level\x12\x06\n\x02L1\x10\0\x12\x0f\n\x02L0\x10\xff\xff\xff\xff\xff\
    \xff\xff\xff\xff\x01\"C\n\x0eGlobalJitLevel\x12\x0b\n\x07DEFAULT\x10\0\
    \x12\x10\n\x03OFF\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x12\x08\n\
    \x04ON_1\x10\x01\x12\x08\n\x04ON_2\x10\x02\"\x90\x04\n\x0cGraphOptions\
    \x124\n\x16enable_recv_scheduling\x18\x02\x20\x01(\x08R\x14enableRecvSch\
    eduling\x12I\n\x11optimizer_options\x18\x03\x20\x01(\x0b2\x1c.tensorflow\
    .OptimizerOptionsR\x10optimizerOptions\x12(\n\x10build_cost_model\x18\
    \x04\x20\x01(\x03R\x0ebuildCostModel\x123\n\x16build_cost_model_after\
    \x18\t\x20\x01(\x03R\x13buildCostModelAfter\x12!\n\x0cinfer_shapes\x18\
    \x05\x20\x01(\x08R\x0binferShapes\x12,\n\x12place_pruned_graph\x18\x06\
    \x20\x01(\x08R\x10placePrunedGraph\x128\n\x18enable_bfloat16_sendrecv\
    \x18\x07\x20\x01(\x08R\x16enableBfloat16Sendrecv\x12#\n\rtimeline_step\
    \x18\x08\x20\x01(\x05R\x0ctimelineStep\x12C\n\x0frewrite_options\x18\n\
    \x20\x01(\x0b2\x1a.tensorflow.RewriterConfigR\x0erewriteOptionsJ\x04\x08\
    \x01\x10\x02R%skip_common_subexpression_elimination\"Y\n\x15ThreadPoolOp\
    tionProto\x12\x1f\n\x0bnum_threads\x18\x01\x20\x01(\x05R\nnumThreads\x12\
    \x1f\n\x0bglobal_name\x18\x02\x20\x01(\tR\nglobalName\"\xa9\x02\n\nRPCOp\
    tions\x12>\n\x1cuse_rpc_for_inprocess_master\x18\x01\x20\x01(\x08R\x18us\
    eRpcForInprocessMaster\x123\n\x15compression_algorithm\x18\x02\x20\x01(\
    \tR\x14compressionAlgorithm\x12+\n\x11compression_level\x18\x03\x20\x01(\
    \x05R\x10compressionLevel\x12,\n\x12cache_rpc_response\x18\x04\x20\x01(\
    \x08R\x10cacheRpcResponse\x12K\n\"disable_session_connection_sharing\x18\
    \x05\x20\x01(\x08R\x1fdisableSessionConnectionSharing\"?\n\x0fSessionMet\
    adata\x12\x12\n\x04name\x18\x01\x20\x01(\tR\x04name\x12\x18\n\x07version\
    \x18\x02\x20\x01(\x03R\x07version\"\x8b\x10\n\x0bConfigProto\x12K\n\x0cd\
    evice_count\x18\x01\x20\x03(\x0b2(.tensorflow.ConfigProto.DeviceCountEnt\
    ryR\x0bdeviceCount\x12?\n\x1cintra_op_parallelism_threads\x18\x02\x20\
    \x01(\x05R\x19intraOpParallelismThreads\x12?\n\x1cinter_op_parallelism_t\
    hreads\x18\x05\x20\x01(\x05R\x19interOpParallelismThreads\x125\n\x17use_\
    per_session_threads\x18\t\x20\x01(\x08R\x14usePerSessionThreads\x12a\n\
    \x1csession_inter_op_thread_pool\x18\x0c\x20\x03(\x0b2!.tensorflow.Threa\
    dPoolOptionProtoR\x18sessionInterOpThreadPool\x12)\n\x10placement_period\
    \x18\x03\x20\x01(\x05R\x0fplacementPeriod\x12%\n\x0edevice_filters\x18\
    \x04\x20\x03(\tR\rdeviceFilters\x127\n\x0bgpu_options\x18\x06\x20\x01(\
    \x0b2\x16.tensorflow.GPUOptionsR\ngpuOptions\x120\n\x14allow_soft_placem\
    ent\x18\x07\x20\x01(\x08R\x12allowSoftPlacement\x120\n\x14log_device_pla\
    cement\x18\x08\x20\x01(\x08R\x12logDevicePlacement\x12=\n\rgraph_options\
    \x18\n\x20\x01(\x0b2\x18.tensorflow.GraphOptionsR\x0cgraphOptions\x125\n\
    \x17operation_timeout_in_ms\x18\x0b\x20\x01(\x03R\x14operationTimeoutInM\
    s\x127\n\x0brpc_options\x18\r\x20\x01(\x0b2\x16.tensorflow.RPCOptionsR\n\
    rpcOptions\x127\n\x0bcluster_def\x18\x0e\x20\x01(\x0b2\x16.tensorflow.Cl\
    usterDefR\nclusterDef\x122\n\x15isolate_session_state\x18\x0f\x20\x01(\
    \x08R\x13isolateSessionState\x12F\n\x20share_cluster_devices_in_session\
    \x18\x11\x20\x01(\x08R\x1cshareClusterDevicesInSession\x12H\n\x0cexperim\
    ental\x18\x10\x20\x01(\x0b2$.tensorflow.ConfigProto.ExperimentalR\x0cexp\
    erimental\x1a>\n\x10DeviceCountEntry\x12\x10\n\x03key\x18\x01\x20\x01(\t\
    R\x03key\x12\x14\n\x05value\x18\x02\x20\x01(\x05R\x05value:\x028\x01\x1a\
    \xb5\x07\n\x0cExperimental\x126\n\x17collective_group_leader\x18\x01\x20\
    \x01(\tR\x15collectiveGroupLeader\x12#\n\rexecutor_type\x18\x03\x20\x01(\
    \tR\x0cexecutorType\x12+\n\x12recv_buf_max_chunk\x18\x04\x20\x01(\x05R\
    \x0frecvBufMaxChunk\x12*\n\x11use_numa_affinity\x18\x05\x20\x01(\x08R\
    \x0fuseNumaAffinity\x12a\n-collective_deterministic_sequential_execution\
    \x18\x06\x20\x01(\x08R*collectiveDeterministicSequentialExecution\x12'\n\
    \x0fcollective_nccl\x18\x07\x20\x01(\x08R\x0ecollectiveNccl\x12a\n.share\
    _session_state_in_clusterspec_propagation\x18\x08\x20\x01(\x08R)shareSes\
    sionStateInClusterspecPropagation\x126\n\x17disable_thread_spinning\x18\
    \t\x20\x01(\x08R\x15disableThreadSpinning\x12F\n\x20share_cluster_device\
    s_in_session\x18\n\x20\x01(\x08R\x1cshareClusterDevicesInSession\x12F\n\
    \x10session_metadata\x18\x0b\x20\x01(\x0b2\x1b.tensorflow.SessionMetadat\
    aR\x0fsessionMetadata\x129\n\x19optimize_for_static_graph\x18\x0c\x20\
    \x01(\x08R\x16optimizeForStaticGraph\x12,\n\x12enable_mlir_bridge\x18\r\
    \x20\x01(\x08R\x10enableMlirBridge\x12C\n\x1eenable_mlir_graph_optimizat\
    ion\x18\x10\x20\x01(\x08R\x1benableMlirGraphOptimization\x12E\n\x1fdisab\
    le_output_partition_graphs\x18\x0e\x20\x01(\x08R\x1cdisableOutputPartiti\
    onGraphs\x12=\n\x1bxla_fusion_autotuner_thresh\x18\x0f\x20\x01(\x03R\x18\
    xlaFusionAutotunerThreshJ\x04\x08\x02\x10\x03\"\xa8\x06\n\nRunOptions\
    \x12B\n\x0btrace_level\x18\x01\x20\x01(\x0e2!.tensorflow.RunOptions.Trac\
    eLevelR\ntraceLevel\x12\"\n\rtimeout_in_ms\x18\x02\x20\x01(\x03R\x0btime\
    outInMs\x12/\n\x14inter_op_thread_pool\x18\x03\x20\x01(\x05R\x11interOpT\
    hreadPool\x126\n\x17output_partition_graphs\x18\x05\x20\x01(\x08R\x15out\
    putPartitionGraphs\x12=\n\rdebug_options\x18\x06\x20\x01(\x0b2\x18.tenso\
    rflow.DebugOptionsR\x0cdebugOptions\x12J\n\"report_tensor_allocations_up\
    on_oom\x18\x07\x20\x01(\x08R\x1ereportTensorAllocationsUponOom\x12G\n\
    \x0cexperimental\x18\x08\x20\x01(\x0b2#.tensorflow.RunOptions.Experiment\
    alR\x0cexperimental\x1a\x9a\x02\n\x0cExperimental\x120\n\x14collective_g\
    raph_key\x18\x01\x20\x01(\x03R\x12collectiveGraphKey\x12/\n\x14use_run_h\
    andler_pool\x18\x02\x20\x01(\x08R\x11useRunHandlerPool\x12r\n\x18run_han\
    dler_pool_options\x18\x03\x20\x01(\x0b29.tensorflow.RunOptions.Experimen\
    tal.RunHandlerPoolOptionsR\x15runHandlerPoolOptions\x1a3\n\x15RunHandler\
    PoolOptions\x12\x1a\n\x08priority\x18\x01\x20\x01(\x03R\x08priority\"R\n\
    \nTraceLevel\x12\x0c\n\x08NO_TRACE\x10\0\x12\x12\n\x0eSOFTWARE_TRACE\x10\
    \x01\x12\x12\n\x0eHARDWARE_TRACE\x10\x02\x12\x0e\n\nFULL_TRACE\x10\x03J\
    \x04\x08\x04\x10\x05\"\xfc\x03\n\x0bRunMetadata\x124\n\nstep_stats\x18\
    \x01\x20\x01(\x0b2\x15.tensorflow.StepStatsR\tstepStats\x127\n\ncost_gra\
    ph\x18\x02\x20\x01(\x0b2\x18.tensorflow.CostGraphDefR\tcostGraph\x12?\n\
    \x10partition_graphs\x18\x03\x20\x03(\x0b2\x14.tensorflow.GraphDefR\x0fp\
    artitionGraphs\x12O\n\x0ffunction_graphs\x18\x04\x20\x03(\x0b2&.tensorfl\
    ow.RunMetadata.FunctionGraphsR\x0efunctionGraphs\x1a\xeb\x01\n\x0eFuncti\
    onGraphs\x12?\n\x10partition_graphs\x18\x01\x20\x03(\x0b2\x14.tensorflow\
    .GraphDefR\x0fpartitionGraphs\x12J\n\x16pre_optimization_graph\x18\x02\
    \x20\x01(\x0b2\x14.tensorflow.GraphDefR\x14preOptimizationGraph\x12L\n\
    \x17post_optimization_graph\x18\x03\x20\x01(\x0b2\x14.tensorflow.GraphDe\
    fR\x15postOptimizationGraph\"P\n\x10TensorConnection\x12\x1f\n\x0bfrom_t\
    ensor\x18\x01\x20\x01(\tR\nfromTensor\x12\x1b\n\tto_tensor\x18\x02\x20\
    \x01(\tR\x08toTensor\"\xa5\x04\n\x0fCallableOptions\x12\x12\n\x04feed\
    \x18\x01\x20\x03(\tR\x04feed\x12\x14\n\x05fetch\x18\x02\x20\x03(\tR\x05f\
    etch\x12\x16\n\x06target\x18\x03\x20\x03(\tR\x06target\x127\n\x0brun_opt\
    ions\x18\x04\x20\x01(\x0b2\x16.tensorflow.RunOptionsR\nrunOptions\x12I\n\
    \x11tensor_connection\x18\x05\x20\x03(\x0b2\x1c.tensorflow.TensorConnect\
    ionR\x10tensorConnection\x12O\n\x0cfeed_devices\x18\x06\x20\x03(\x0b2,.t\
    ensorflow.CallableOptions.FeedDevicesEntryR\x0bfeedDevices\x12R\n\rfetch\
    _devices\x18\x07\x20\x03(\x0b2-.tensorflow.CallableOptions.FetchDevicesE\
    ntryR\x0cfetchDevices\x12&\n\x0ffetch_skip_sync\x18\x08\x20\x01(\x08R\rf\
    etchSkipSync\x1a>\n\x10FeedDevicesEntry\x12\x10\n\x03key\x18\x01\x20\x01\
    (\tR\x03key\x12\x14\n\x05value\x18\x02\x20\x01(\tR\x05value:\x028\x01\
    \x1a?\n\x11FetchDevicesEntry\x12\x10\n\x03key\x18\x01\x20\x01(\tR\x03key\
    \x12\x14\n\x05value\x18\x02\x20\x01(\tR\x05value:\x028\x01Bw\n\x18org.te\
    nsorflow.frameworkB\x0cConfigProtosP\x01ZHgithub.com/tensorflow/tensorfl\
    ow/tensorflow/go/core/core_protos_go_proto\xf8\x01\x01J\xe8\x96\x02\n\
    \x07\x12\x05\0\0\xa7\x06\x01\n\x08\n\x01\x0c\x12\x03\0\0\x12\n\x08\n\x01\
    \x02\x12\x03\x02\0\x13\n\t\n\x02\x03\0\x12\x03\x04\04\n\t\n\x02\x03\x01\
    \x12\x03\x05\0/\n\t\n\x02\x03\x02\x12\x03\x06\04\n\t\n\x02\x03\x03\x12\
    \x03\x07\00\n\t\n\x02\x03\x04\x12\x03\x08\0.\n\t\n\x02\x03\x05\x12\x03\t\
    \08\n\x08\n\x01\x08\x12\x03\x0b\0\x1f\n\t\n\x02\x08\x1f\x12\x03\x0b\0\
    \x1f\n\x08\n\x01\x08\x12\x03\x0c\0-\n\t\n\x02\x08\x08\x12\x03\x0c\0-\n\
    \x08\n\x01\x08\x12\x03\r\0\"\n\t\n\x02\x08\n\x12\x03\r\0\"\n\x08\n\x01\
    \x08\x12\x03\x0e\01\n\t\n\x02\x08\x01\x12\x03\x0e\01\n\x08\n\x01\x08\x12\
    \x03\x0f\0_\n\t\n\x02\x08\x0b\x12\x03\x0f\0_\n\x0b\n\x02\x04\0\x12\x05\
    \x11\0\xcd\x01\x01\n\n\n\x03\x04\0\x01\x12\x03\x11\x08\x12\n\xe2\x07\n\
    \x04\x04\0\x02\0\x12\x03#\x02-\x1a\xd4\x07\x20Fraction\x20of\x20the\x20a\
    vailable\x20GPU\x20memory\x20to\x20allocate\x20for\x20each\x20process.\n\
    \x201\x20means\x20to\x20allocate\x20all\x20of\x20the\x20GPU\x20memory,\
    \x200.5\x20means\x20the\x20process\n\x20allocates\x20up\x20to\x20~50%\
    \x20of\x20the\x20available\x20GPU\x20memory.\n\n\x20GPU\x20memory\x20is\
    \x20pre-allocated\x20unless\x20the\x20allow_growth\x20option\x20is\x20en\
    abled.\n\n\x20If\x20greater\x20than\x201.0,\x20uses\x20CUDA\x20unified\
    \x20memory\x20to\x20potentially\x20oversubscribe\n\x20the\x20amount\x20o\
    f\x20memory\x20available\x20on\x20the\x20GPU\x20device\x20by\x20using\
    \x20host\x20memory\x20as\x20a\n\x20swap\x20space.\x20Accessing\x20memory\
    \x20not\x20available\x20on\x20the\x20device\x20will\x20be\n\x20significa\
    ntly\x20slower\x20as\x20that\x20would\x20require\x20memory\x20transfer\
    \x20between\x20the\x20host\n\x20and\x20the\x20device.\x20Options\x20to\
    \x20reduce\x20the\x20memory\x20requirement\x20should\x20be\n\x20consider\
    ed\x20before\x20enabling\x20this\x20option\x20as\x20this\x20may\x20come\
    \x20with\x20a\x20negative\n\x20performance\x20impact.\x20Oversubscriptio\
    n\x20using\x20the\x20unified\x20memory\x20requires\n\x20Pascal\x20class\
    \x20or\x20newer\x20GPUs\x20and\x20it\x20is\x20currently\x20only\x20suppo\
    rted\x20on\x20the\x20Linux\n\x20operating\x20system.\x20See\n\x20https:/\
    /docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#um-requirement\
    s\n\x20for\x20the\x20detailed\x20requirements.\n\n\x0c\n\x05\x04\0\x02\0\
    \x05\x12\x03#\x02\x08\n\x0c\n\x05\x04\0\x02\0\x01\x12\x03#\t(\n\x0c\n\
    \x05\x04\0\x02\0\x03\x12\x03#+,\n\x93\x01\n\x04\x04\0\x02\x01\x12\x03'\
    \x02\x18\x1a\x85\x01\x20If\x20true,\x20the\x20allocator\x20does\x20not\
    \x20pre-allocate\x20the\x20entire\x20specified\n\x20GPU\x20memory\x20reg\
    ion,\x20instead\x20starting\x20small\x20and\x20growing\x20as\x20needed.\
    \n\n\x0c\n\x05\x04\0\x02\x01\x05\x12\x03'\x02\x06\n\x0c\n\x05\x04\0\x02\
    \x01\x01\x12\x03'\x07\x13\n\x0c\n\x05\x04\0\x02\x01\x03\x12\x03'\x16\x17\
    \n\x8b\x02\n\x04\x04\0\x02\x02\x12\x031\x02\x1c\x1a\xfd\x01\x20The\x20ty\
    pe\x20of\x20GPU\x20allocation\x20strategy\x20to\x20use.\n\n\x20Allowed\
    \x20values:\n\x20\"\":\x20The\x20empty\x20string\x20(default)\x20uses\
    \x20a\x20system-chosen\x20default\n\x20\x20\x20\x20\x20which\x20may\x20c\
    hange\x20over\x20time.\n\n\x20\"BFC\":\x20A\x20\"Best-fit\x20with\x20coa\
    lescing\"\x20algorithm,\x20simplified\x20from\x20a\n\x20\x20\x20\x20\x20\
    \x20\x20\x20version\x20of\x20dlmalloc.\n\n\x0c\n\x05\x04\0\x02\x02\x05\
    \x12\x031\x02\x08\n\x0c\n\x05\x04\0\x02\x02\x01\x12\x031\t\x17\n\x0c\n\
    \x05\x04\0\x02\x02\x03\x12\x031\x1a\x1b\n\xb2\x01\n\x04\x04\0\x02\x03\
    \x12\x036\x02$\x1a\xa4\x01\x20Delay\x20deletion\x20of\x20up\x20to\x20thi\
    s\x20many\x20bytes\x20to\x20reduce\x20the\x20number\x20of\n\x20interacti\
    ons\x20with\x20gpu\x20driver\x20code.\x20\x20If\x200,\x20the\x20system\
    \x20chooses\n\x20a\x20reasonable\x20default\x20(several\x20MBs).\n\n\x0c\
    \n\x05\x04\0\x02\x03\x05\x12\x036\x02\x07\n\x0c\n\x05\x04\0\x02\x03\x01\
    \x12\x036\x08\x1f\n\x0c\n\x05\x04\0\x02\x03\x03\x12\x036\"#\n\xdc\t\n\
    \x04\x04\0\x02\x04\x12\x03M\x02!\x1a\xce\t\x20A\x20comma-separated\x20li\
    st\x20of\x20GPU\x20ids\x20that\x20determines\x20the\x20'visible'\n\x20to\
    \x20'virtual'\x20mapping\x20of\x20GPU\x20devices.\x20\x20For\x20example,\
    \x20if\x20TensorFlow\n\x20can\x20see\x208\x20GPU\x20devices\x20in\x20the\
    \x20process,\x20and\x20one\x20wanted\x20to\x20map\n\x20visible\x20GPU\
    \x20devices\x205\x20and\x203\x20as\x20\"/device:GPU:0\",\x20and\x20\"/de\
    vice:GPU:1\",\n\x20then\x20one\x20would\x20specify\x20this\x20field\x20a\
    s\x20\"5,3\".\x20\x20This\x20field\x20is\x20similar\x20in\n\x20spirit\
    \x20to\x20the\x20CUDA_VISIBLE_DEVICES\x20environment\x20variable,\x20exc\
    ept\n\x20it\x20applies\x20to\x20the\x20visible\x20GPU\x20devices\x20in\
    \x20the\x20process.\n\n\x20NOTE:\n\x201.\x20The\x20GPU\x20driver\x20prov\
    ides\x20the\x20process\x20with\x20the\x20visible\x20GPUs\n\x20\x20\x20\
    \x20in\x20an\x20order\x20which\x20is\x20not\x20guaranteed\x20to\x20have\
    \x20any\x20correlation\x20to\n\x20\x20\x20\x20the\x20*physical*\x20GPU\
    \x20id\x20in\x20the\x20machine.\x20\x20This\x20field\x20is\x20used\x20fo\
    r\n\x20\x20\x20\x20remapping\x20\"visible\"\x20to\x20\"virtual\",\x20whi\
    ch\x20means\x20this\x20operates\x20only\n\x20\x20\x20\x20after\x20the\
    \x20process\x20starts.\x20\x20Users\x20are\x20required\x20to\x20use\x20v\
    endor\n\x20\x20\x20\x20specific\x20mechanisms\x20(e.g.,\x20CUDA_VISIBLE_\
    DEVICES)\x20to\x20control\x20the\n\x20\x20\x20\x20physical\x20to\x20visi\
    ble\x20device\x20mapping\x20prior\x20to\x20invoking\x20TensorFlow.\n\x20\
    2.\x20In\x20the\x20code,\x20the\x20ids\x20in\x20this\x20list\x20are\x20a\
    lso\x20called\x20\"platform\x20GPU\x20id\"s,\n\x20\x20\x20\x20and\x20the\
    \x20'virtual'\x20ids\x20of\x20GPU\x20devices\x20(i.e.\x20the\x20ids\x20i\
    n\x20the\x20device\n\x20\x20\x20\x20name\x20\"/device:GPU:<id>\")\x20are\
    \x20also\x20called\x20\"TF\x20GPU\x20id\"s.\x20Please\n\x20\x20\x20\x20r\
    efer\x20to\x20third_party/tensorflow/core/common_runtime/gpu/gpu_id.h\n\
    \x20\x20\x20\x20for\x20more\x20information.\n\n\x0c\n\x05\x04\0\x02\x04\
    \x05\x12\x03M\x02\x08\n\x0c\n\x05\x04\0\x02\x04\x01\x12\x03M\t\x1c\n\x0c\
    \n\x05\x04\0\x02\x04\x03\x12\x03M\x1f\x20\n\xc1\x01\n\x04\x04\0\x02\x05\
    \x12\x03R\x02'\x1a\xb3\x01\x20In\x20the\x20event\x20polling\x20loop\x20s\
    leep\x20this\x20many\x20microseconds\x20between\n\x20PollEvents\x20calls\
    ,\x20when\x20the\x20queue\x20is\x20not\x20empty.\x20\x20If\x20value\x20i\
    s\x20not\n\x20set\x20or\x20set\x20to\x200,\x20gets\x20set\x20to\x20a\x20\
    non-zero\x20default.\n\n\x0c\n\x05\x04\0\x02\x05\x05\x12\x03R\x02\x07\n\
    \x0c\n\x05\x04\0\x02\x05\x01\x12\x03R\x08\"\n\x0c\n\x05\x04\0\x02\x05\
    \x03\x12\x03R%&\n4\n\x04\x04\0\x02\x06\x12\x03U\x02)\x1a'\x20This\x20fie\
    ld\x20is\x20deprecated\x20and\x20ignored.\n\n\x0c\n\x05\x04\0\x02\x06\
    \x05\x12\x03U\x02\x07\n\x0c\n\x05\x04\0\x02\x06\x01\x12\x03U\x08$\n\x0c\
    \n\x05\x04\0\x02\x06\x03\x12\x03U'(\n\x9a\x05\n\x04\x04\0\x02\x07\x12\
    \x03a\x02\x20\x1a\x8c\x05\x20Force\x20all\x20tensors\x20to\x20be\x20gpu_\
    compatible.\x20On\x20a\x20GPU-enabled\x20TensorFlow,\n\x20enabling\x20th\
    is\x20option\x20forces\x20all\x20CPU\x20tensors\x20to\x20be\x20allocated\
    \x20with\x20Cuda\n\x20pinned\x20memory.\x20Normally,\x20TensorFlow\x20wi\
    ll\x20infer\x20which\x20tensors\x20should\x20be\n\x20allocated\x20as\x20\
    the\x20pinned\x20memory.\x20But\x20in\x20case\x20where\x20the\x20inferen\
    ce\x20is\n\x20incomplete,\x20this\x20option\x20can\x20significantly\x20s\
    peed\x20up\x20the\x20cross-device\x20memory\n\x20copy\x20performance\x20\
    as\x20long\x20as\x20it\x20fits\x20the\x20memory.\n\x20Note\x20that\x20th\
    is\x20option\x20is\x20not\x20something\x20that\x20should\x20be\n\x20enab\
    led\x20by\x20default\x20for\x20unknown\x20or\x20very\x20large\x20models,\
    \x20since\x20all\x20Cuda\x20pinned\n\x20memory\x20is\x20unpageable,\x20h\
    aving\x20too\x20much\x20pinned\x20memory\x20might\x20negatively\x20impac\
    t\n\x20the\x20overall\x20host\x20system\x20performance.\n\n\x0c\n\x05\
    \x04\0\x02\x07\x05\x12\x03a\x02\x06\n\x0c\n\x05\x04\0\x02\x07\x01\x12\
    \x03a\x07\x1b\n\x0c\n\x05\x04\0\x02\x07\x03\x12\x03a\x1e\x1f\n\r\n\x04\
    \x04\0\x03\0\x12\x05c\x02\xc7\x01\x03\n\x0c\n\x05\x04\0\x03\0\x01\x12\
    \x03c\n\x16\na\n\x06\x04\0\x03\0\x03\0\x12\x04f\x04|\x05\x1aQ\x20Configu\
    ration\x20for\x20breaking\x20down\x20a\x20visible\x20GPU\x20into\x20mult\
    iple\x20\"virtual\"\n\x20devices.\n\n\x0e\n\x07\x04\0\x03\0\x03\0\x01\
    \x12\x03f\x0c\x1a\n\xa5\x03\n\x08\x04\0\x03\0\x03\0\x02\0\x12\x03o\x06)\
    \x1a\x93\x03\x20Per\x20\"virtual\"\x20device\x20memory\x20limit,\x20in\
    \x20MB.\x20The\x20number\x20of\x20elements\x20in\n\x20the\x20list\x20is\
    \x20the\x20number\x20of\x20virtual\x20devices\x20to\x20create\x20on\x20t\
    he\n\x20corresponding\x20visible\x20GPU\x20(see\x20\"virtual_devices\"\
    \x20below).\n\x20If\x20empty,\x20it\x20will\x20create\x20single\x20virtu\
    al\x20device\x20taking\x20all\x20available\n\x20memory\x20from\x20the\
    \x20device.\n\n\x20For\x20the\x20concept\x20of\x20\"visible\"\x20and\x20\
    \"virtual\"\x20GPU,\x20see\x20the\x20comments\x20for\n\x20\"visible_devi\
    ce_list\"\x20above\x20for\x20more\x20information.\n\n\x10\n\t\x04\0\x03\
    \0\x03\0\x02\0\x04\x12\x03o\x06\x0e\n\x10\n\t\x04\0\x03\0\x03\0\x02\0\
    \x05\x12\x03o\x0f\x14\n\x10\n\t\x04\0\x03\0\x03\0\x02\0\x01\x12\x03o\x15\
    $\n\x10\n\t\x04\0\x03\0\x03\0\x02\0\x03\x12\x03o'(\n\xe1\x03\n\x08\x04\0\
    \x03\0\x03\0\x02\x01\x12\x03{\x06\"\x1a\xcf\x03\x20Priority\x20values\
    \x20to\x20use\x20with\x20the\x20virtual\x20devices.\x20Use\x20the\x20cud\
    a\x20function\n\x20cudaDeviceGetStreamPriorityRange\x20to\x20query\x20fo\
    r\x20valid\x20range\x20of\x20values\x20for\n\x20priority.\n\n\x20On\x20a\
    \x20P4000\x20GPU\x20with\x20cuda\x2010.1,\x20the\x20priority\x20range\
    \x20reported\x20was\x200\x20for\n\x20least\x20priority\x20and\x20-1\x20f\
    or\x20greatest\x20priority.\n\n\x20If\x20this\x20field\x20is\x20not\x20s\
    pecified,\x20then\x20the\x20virtual\x20devices\x20will\x20be\n\x20create\
    d\x20with\x20the\x20default.\x20If\x20this\x20field\x20has\x20values\x20\
    set,\x20then\x20the\x20size\n\x20of\x20this\x20must\x20match\x20with\x20\
    the\x20above\x20memory_limit_mb.\n\n\x10\n\t\x04\0\x03\0\x03\0\x02\x01\
    \x04\x12\x03{\x06\x0e\n\x10\n\t\x04\0\x03\0\x03\0\x02\x01\x05\x12\x03{\
    \x0f\x14\n\x10\n\t\x04\0\x03\0\x03\0\x02\x01\x01\x12\x03{\x15\x1d\n\x10\
    \n\t\x04\0\x03\0\x03\0\x02\x01\x03\x12\x03{\x20!\n\xae\t\n\x06\x04\0\x03\
    \0\x02\0\x12\x04\x95\x01\x040\x1a\x9d\t\x20The\x20multi\x20virtual\x20de\
    vice\x20settings.\x20If\x20empty\x20(not\x20set),\x20it\x20will\x20creat\
    e\n\x20single\x20virtual\x20device\x20on\x20each\x20visible\x20GPU,\x20a\
    ccording\x20to\x20the\x20settings\n\x20in\x20\"visible_device_list\"\x20\
    above.\x20Otherwise,\x20the\x20number\x20of\x20elements\x20in\x20the\n\
    \x20list\x20must\x20be\x20the\x20same\x20as\x20the\x20number\x20of\x20vi\
    sible\x20GPUs\x20(after\n\x20\"visible_device_list\"\x20filtering\x20if\
    \x20it\x20is\x20set),\x20and\x20the\x20string\x20represented\n\x20device\
    \x20names\x20(e.g.\x20/device:GPU:<id>)\x20will\x20refer\x20to\x20the\
    \x20virtual\n\x20devices\x20and\x20have\x20the\x20<id>\x20field\x20assig\
    ned\x20sequentially\x20starting\x20from\x200,\n\x20according\x20to\x20th\
    e\x20order\x20they\x20appear\x20in\x20this\x20list\x20and\x20the\x20\"me\
    mory_limit\"\n\x20list\x20inside\x20each\x20element.\x20For\x20example,\
    \n\x20\x20\x20visible_device_list\x20=\x20\"1,0\"\n\x20\x20\x20virtual_d\
    evices\x20{\x20memory_limit:\x201GB\x20memory_limit:\x202GB\x20}\n\x20\
    \x20\x20virtual_devices\x20{}\n\x20will\x20create\x20three\x20virtual\
    \x20devices\x20as:\n\x20\x20\x20/device:GPU:0\x20->\x20visible\x20GPU\
    \x201\x20with\x201GB\x20memory\n\x20\x20\x20/device:GPU:1\x20->\x20visib\
    le\x20GPU\x201\x20with\x202GB\x20memory\n\x20\x20\x20/device:GPU:2\x20->\
    \x20visible\x20GPU\x200\x20with\x20all\x20available\x20memory\n\n\x20NOT\
    E:\n\x201.\x20It's\x20invalid\x20to\x20set\x20both\x20this\x20and\x20\"p\
    er_process_gpu_memory_fraction\"\n\x20\x20\x20\x20at\x20the\x20same\x20t\
    ime.\n\x202.\x20Currently\x20this\x20setting\x20is\x20per-process,\x20no\
    t\x20per-session.\x20Using\n\x20\x20\x20\x20different\x20settings\x20in\
    \x20different\x20sessions\x20within\x20same\x20process\x20will\n\x20\x20\
    \x20\x20result\x20in\x20undefined\x20behavior.\n\n\x0f\n\x07\x04\0\x03\0\
    \x02\0\x04\x12\x04\x95\x01\x04\x0c\n\x0f\n\x07\x04\0\x03\0\x02\0\x06\x12\
    \x04\x95\x01\r\x1b\n\x0f\n\x07\x04\0\x03\0\x02\0\x01\x12\x04\x95\x01\x1c\
    +\n\x0f\n\x07\x04\0\x03\0\x02\0\x03\x12\x04\x95\x01./\n\xe6\x03\n\x06\
    \x04\0\x03\0\x02\x01\x12\x04\x9e\x01\x04\x20\x1a\xd5\x03\x20If\x20true,\
    \x20uses\x20CUDA\x20unified\x20memory\x20for\x20memory\x20allocations.\
    \x20If\n\x20per_process_gpu_memory_fraction\x20option\x20is\x20greater\
    \x20than\x201.0,\x20then\x20unified\n\x20memory\x20is\x20used\x20regardl\
    ess\x20of\x20the\x20value\x20for\x20this\x20field.\x20See\x20comments\
    \x20for\n\x20per_process_gpu_memory_fraction\x20field\x20for\x20more\x20\
    details\x20and\x20requirements\n\x20of\x20the\x20unified\x20memory.\x20T\
    his\x20option\x20is\x20useful\x20to\x20oversubscribe\x20memory\x20if\n\
    \x20multiple\x20processes\x20are\x20sharing\x20a\x20single\x20GPU\x20whi\
    le\x20individually\x20using\x20less\n\x20than\x201.0\x20per\x20process\
    \x20memory\x20fraction.\n\n\x0f\n\x07\x04\0\x03\0\x02\x01\x05\x12\x04\
    \x9e\x01\x04\x08\n\x0f\n\x07\x04\0\x03\0\x02\x01\x01\x12\x04\x9e\x01\t\
    \x1b\n\x0f\n\x07\x04\0\x03\0\x02\x01\x03\x12\x04\x9e\x01\x1e\x1f\n\xa2\
    \x01\n\x06\x04\0\x03\0\x02\x02\x12\x04\xa3\x01\x04*\x1a\x91\x01\x20If\
    \x20>\x201,\x20the\x20number\x20of\x20device-to-device\x20copy\x20stream\
    s\x20to\x20create\n\x20for\x20each\x20GPUDevice.\x20\x20Default\x20value\
    \x20is\x200,\x20which\x20is\x20automatically\n\x20converted\x20to\x201.\
    \n\n\x0f\n\x07\x04\0\x03\0\x02\x02\x05\x12\x04\xa3\x01\x04\t\n\x0f\n\x07\
    \x04\0\x03\0\x02\x02\x01\x12\x04\xa3\x01\n%\n\x0f\n\x07\x04\0\x03\0\x02\
    \x02\x03\x12\x04\xa3\x01()\n\xb4\x03\n\x06\x04\0\x03\0\x02\x03\x12\x04\
    \xab\x01\x04%\x1a\xa3\x03\x20If\x20non-empty,\x20defines\x20a\x20good\
    \x20GPU\x20ring\x20order\x20on\x20a\x20single\x20worker\x20based\x20on\n\
    \x20device\x20interconnect.\x20\x20This\x20assumes\x20that\x20all\x20wor\
    kers\x20have\x20the\x20same\x20GPU\n\x20topology.\x20\x20Specify\x20as\
    \x20a\x20comma-separated\x20string,\x20e.g.\x20\"3,2,1,0,7,6,5,4\".\n\
    \x20This\x20ring\x20order\x20is\x20used\x20by\x20the\x20RingReducer\x20i\
    mplementation\x20of\n\x20CollectiveReduce,\x20and\x20serves\x20as\x20an\
    \x20override\x20to\x20automatic\x20ring\x20order\n\x20generation\x20in\
    \x20OrderTaskDeviceMap()\x20during\x20CollectiveParam\x20resolution.\n\n\
    \x0f\n\x07\x04\0\x03\0\x02\x03\x05\x12\x04\xab\x01\x04\n\n\x0f\n\x07\x04\
    \0\x03\0\x02\x03\x01\x12\x04\xab\x01\x0b\x20\n\x0f\n\x07\x04\0\x03\0\x02\
    \x03\x03\x12\x04\xab\x01#$\n\x80\x02\n\x06\x04\0\x03\0\x02\x04\x12\x04\
    \xb1\x01\x04#\x1a\xef\x01\x20If\x20true\x20then\x20extra\x20work\x20is\
    \x20done\x20by\x20GPUDevice\x20and\x20GPUBFCAllocator\x20to\n\x20keep\
    \x20track\x20of\x20when\x20GPU\x20memory\x20is\x20freed\x20and\x20when\
    \x20kernels\x20actually\n\x20complete\x20so\x20that\x20we\x20can\x20know\
    \x20when\x20a\x20nominally\x20free\x20memory\x20chunk\n\x20is\x20really\
    \x20not\x20subject\x20to\x20pending\x20use.\n\n\x0f\n\x07\x04\0\x03\0\
    \x02\x04\x05\x12\x04\xb1\x01\x04\x08\n\x0f\n\x07\x04\0\x03\0\x02\x04\x01\
    \x12\x04\xb1\x01\t\x1e\n\x0f\n\x07\x04\0\x03\0\x02\x04\x03\x12\x04\xb1\
    \x01!\"\n\xb5\x02\n\x06\x04\0\x03\0\x02\x05\x12\x04\xbb\x01\x04*\x1a\x92\
    \x02\x20Parameters\x20for\x20GPUKernelTracker.\x20\x20By\x20default\x20n\
    o\x20kernel\x20tracking\x20is\x20done.\n\x20Note\x20that\x20timestamped_\
    allocator\x20is\x20only\x20effective\x20if\x20some\x20tracking\x20is\n\
    \x20specified.\n\n\x20If\x20kernel_tracker_max_interval\x20=\x20n\x20>\
    \x200,\x20then\x20a\x20tracking\x20event\n\x20is\x20inserted\x20after\
    \x20every\x20n\x20kernels\x20without\x20an\x20event.\n2\x10\x20reserved\
    \x20id:\x206\n\n\x0f\n\x07\x04\0\x03\0\x02\x05\x05\x12\x04\xbb\x01\x04\t\
    \n\x0f\n\x07\x04\0\x03\0\x02\x05\x01\x12\x04\xbb\x01\n%\n\x0f\n\x07\x04\
    \0\x03\0\x02\x05\x03\x12\x04\xbb\x01()\n\x9e\x02\n\x06\x04\0\x03\0\x02\
    \x06\x12\x04\xc1\x01\x04'\x1a\x8d\x02\x20If\x20kernel_tracker_max_bytes\
    \x20=\x20n\x20>\x200,\x20then\x20a\x20tracking\x20event\x20is\n\x20inser\
    ted\x20after\x20every\x20series\x20of\x20kernels\x20allocating\x20a\x20s\
    um\x20of\n\x20memory\x20>=\x20n.\x20\x20If\x20one\x20kernel\x20allocates\
    \x20b\x20*\x20n\x20bytes,\x20then\x20one\n\x20event\x20will\x20be\x20ins\
    erted\x20after\x20it,\x20but\x20it\x20will\x20count\x20as\x20b\x20agains\
    t\n\x20the\x20pending\x20limit.\n\n\x0f\n\x07\x04\0\x03\0\x02\x06\x05\
    \x12\x04\xc1\x01\x04\t\n\x0f\n\x07\x04\0\x03\0\x02\x06\x01\x12\x04\xc1\
    \x01\n\"\n\x0f\n\x07\x04\0\x03\0\x02\x06\x03\x12\x04\xc1\x01%&\n\xd1\x01\
    \n\x06\x04\0\x03\0\x02\x07\x12\x04\xc6\x01\x04)\x1a\xc0\x01\x20If\x20ker\
    nel_tracker_max_pending\x20>\x200\x20then\x20no\x20more\x20than\x20this\
    \x20many\n\x20tracking\x20events\x20can\x20be\x20outstanding\x20at\x20a\
    \x20time.\x20\x20An\x20attempt\x20to\n\x20launch\x20an\x20additional\x20\
    kernel\x20will\x20stall\x20until\x20an\x20event\n\x20completes.\n\n\x0f\
    \n\x07\x04\0\x03\0\x02\x07\x05\x12\x04\xc6\x01\x04\t\n\x0f\n\x07\x04\0\
    \x03\0\x02\x07\x01\x12\x04\xc6\x01\n$\n\x0f\n\x07\x04\0\x03\0\x02\x07\
    \x03\x12\x04\xc6\x01'(\n\xa9\x01\n\x04\x04\0\x02\x08\x12\x04\xcc\x01\x02\
    \x20\x1a\x9a\x01\x20Everything\x20inside\x20experimental\x20is\x20subjec\
    t\x20to\x20change\x20and\x20is\x20not\x20subject\n\x20to\x20API\x20stabi\
    lity\x20guarantees\x20in\n\x20https://www.tensorflow.org/guide/version_c\
    ompat.\n\n\r\n\x05\x04\0\x02\x08\x06\x12\x04\xcc\x01\x02\x0e\n\r\n\x05\
    \x04\0\x02\x08\x01\x12\x04\xcc\x01\x0f\x1b\n\r\n\x05\x04\0\x02\x08\x03\
    \x12\x04\xcc\x01\x1e\x1f\n5\n\x02\x04\x01\x12\x06\xd0\x01\0\xfd\x01\x01\
    \x1a'\x20Options\x20passed\x20to\x20the\x20graph\x20optimizer\n\n\x0b\n\
    \x03\x04\x01\x01\x12\x04\xd0\x01\x08\x18\nS\n\x04\x04\x01\x02\0\x12\x04\
    \xd2\x01\x02/\x1aE\x20If\x20true,\x20optimize\x20the\x20graph\x20using\
    \x20common\x20subexpression\x20elimination.\n\n\r\n\x05\x04\x01\x02\0\
    \x05\x12\x04\xd2\x01\x02\x06\n\r\n\x05\x04\x01\x02\0\x01\x12\x04\xd2\x01\
    \x07*\n\r\n\x05\x04\x01\x02\0\x03\x12\x04\xd2\x01-.\nL\n\x04\x04\x01\x02\
    \x01\x12\x04\xd5\x01\x02\x1f\x1a>\x20If\x20true,\x20perform\x20constant\
    \x20folding\x20optimization\x20on\x20the\x20graph.\n\n\r\n\x05\x04\x01\
    \x02\x01\x05\x12\x04\xd5\x01\x02\x06\n\r\n\x05\x04\x01\x02\x01\x01\x12\
    \x04\xd5\x01\x07\x1a\n\r\n\x05\x04\x01\x02\x01\x03\x12\x04\xd5\x01\x1d\
    \x1e\n\xdc\x02\n\x04\x04\x01\x02\x02\x12\x04\xdc\x01\x02)\x1a\xcd\x02\
    \x20Constant\x20folding\x20optimization\x20replaces\x20tensors\x20whose\
    \x20values\x20can\x20be\n\x20predetermined,\x20with\x20constant\x20nodes\
    .\x20To\x20avoid\x20inserting\x20too\x20large\x20constants,\n\x20the\x20\
    size\x20of\x20each\x20constant\x20created\x20can\x20be\x20limited.\x20If\
    \x20this\x20value\x20is\x20zero,\x20a\n\x20default\x20limit\x20of\x2010\
    \x20MiB\x20will\x20be\x20applied.\x20If\x20constant\x20folding\x20optimi\
    zation\n\x20is\x20disabled,\x20this\x20value\x20is\x20ignored.\n\n\r\n\
    \x05\x04\x01\x02\x02\x05\x12\x04\xdc\x01\x02\x07\n\r\n\x05\x04\x01\x02\
    \x02\x01\x12\x04\xdc\x01\x08$\n\r\n\x05\x04\x01\x02\x02\x03\x12\x04\xdc\
    \x01'(\n@\n\x04\x04\x01\x02\x03\x12\x04\xdf\x01\x02\x20\x1a2\x20If\x20tr\
    ue,\x20perform\x20function\x20inlining\x20on\x20the\x20graph.\n\n\r\n\
    \x05\x04\x01\x02\x03\x05\x12\x04\xdf\x01\x02\x06\n\r\n\x05\x04\x01\x02\
    \x03\x01\x12\x04\xdf\x01\x07\x1b\n\r\n\x05\x04\x01\x02\x03\x03\x12\x04\
    \xdf\x01\x1e\x1f\n$\n\x04\x04\x01\x04\0\x12\x06\xe2\x01\x02\xeb\x01\x03\
    \x1a\x14\x20Optimization\x20level\n\n\r\n\x05\x04\x01\x04\0\x01\x12\x04\
    \xe2\x01\x07\x0c\n\x84\x01\n\x06\x04\x01\x04\0\x02\0\x12\x04\xe7\x01\x04\
    \x0b\x1at\x20L1\x20is\x20the\x20default\x20level.\n\x20Optimization\x20p\
    erformed\x20at\x20L1\x20:\n\x201.\x20Common\x20subexpression\x20eliminat\
    ion\n\x202.\x20Constant\x20folding\n\n\x0f\n\x07\x04\x01\x04\0\x02\0\x01\
    \x12\x04\xe7\x01\x04\x06\n\x0f\n\x07\x04\x01\x04\0\x02\0\x02\x12\x04\xe7\
    \x01\t\n\n\"\n\x06\x04\x01\x04\0\x02\x01\x12\x04\xea\x01\x04\x0c\x1a\x12\
    \x20No\x20optimizations\n\n\x0f\n\x07\x04\x01\x04\0\x02\x01\x01\x12\x04\
    \xea\x01\x04\x06\n\x0f\n\x07\x04\x01\x04\0\x02\x01\x02\x12\x04\xea\x01\t\
    \x0b\n\xa5\x01\n\x04\x04\x01\x02\x04\x12\x04\xef\x01\x02\x16\x1a\x96\x01\
    \x20Overall\x20optimization\x20level.\x20The\x20actual\x20optimizations\
    \x20applied\x20will\x20be\x20the\n\x20logical\x20OR\x20of\x20the\x20flag\
    s\x20that\x20this\x20level\x20implies\x20and\x20any\x20flags\x20already\
    \x20set.\n\n\r\n\x05\x04\x01\x02\x04\x06\x12\x04\xef\x01\x02\x07\n\r\n\
    \x05\x04\x01\x02\x04\x01\x12\x04\xef\x01\x08\x11\n\r\n\x05\x04\x01\x02\
    \x04\x03\x12\x04\xef\x01\x14\x15\nE\n\x04\x04\x01\x04\x01\x12\x06\xf2\
    \x01\x02\xfb\x01\x03\x1a5\x20Control\x20the\x20use\x20of\x20the\x20compi\
    ler/jit.\x20\x20Experimental.\n\n\r\n\x05\x04\x01\x04\x01\x01\x12\x04\
    \xf2\x01\x07\x15\nL\n\x06\x04\x01\x04\x01\x02\0\x12\x04\xf3\x01\x04\x10\
    \"<\x20Default\x20setting\x20(\"off\"\x20now,\x20but\x20later\x20expecte\
    d\x20to\x20be\x20\"on\")\n\n\x0f\n\x07\x04\x01\x04\x01\x02\0\x01\x12\x04\
    \xf3\x01\x04\x0b\n\x0f\n\x07\x04\x01\x04\x01\x02\0\x02\x12\x04\xf3\x01\
    \x0e\x0f\n\x0e\n\x06\x04\x01\x04\x01\x02\x01\x12\x04\xf4\x01\x04\r\n\x0f\
    \n\x07\x04\x01\x04\x01\x02\x01\x01\x12\x04\xf4\x01\x04\x07\n\x0f\n\x07\
    \x04\x01\x04\x01\x02\x01\x02\x12\x04\xf4\x01\n\x0c\n\x84\x02\n\x06\x04\
    \x01\x04\x01\x02\x02\x12\x04\xf9\x01\x04\r\x1a\xf3\x01\x20The\x20followi\
    ng\x20settings\x20turn\x20on\x20compilation,\x20with\x20higher\x20values\
    \x20being\n\x20more\x20aggressive.\x20\x20Higher\x20values\x20may\x20red\
    uce\x20opportunities\x20for\x20parallelism\n\x20and\x20may\x20use\x20mor\
    e\x20memory.\x20\x20(At\x20present,\x20there\x20is\x20no\x20distinction,\
    \x20but\x20this\n\x20is\x20expected\x20to\x20change.)\n\n\x0f\n\x07\x04\
    \x01\x04\x01\x02\x02\x01\x12\x04\xf9\x01\x04\x08\n\x0f\n\x07\x04\x01\x04\
    \x01\x02\x02\x02\x12\x04\xf9\x01\x0b\x0c\n\x0e\n\x06\x04\x01\x04\x01\x02\
    \x03\x12\x04\xfa\x01\x04\r\n\x0f\n\x07\x04\x01\x04\x01\x02\x03\x01\x12\
    \x04\xfa\x01\x04\x08\n\x0f\n\x07\x04\x01\x04\x01\x02\x03\x02\x12\x04\xfa\
    \x01\x0b\x0c\n\x0c\n\x04\x04\x01\x02\x05\x12\x04\xfc\x01\x02&\n\r\n\x05\
    \x04\x01\x02\x05\x06\x12\x04\xfc\x01\x02\x10\n\r\n\x05\x04\x01\x02\x05\
    \x01\x12\x04\xfc\x01\x11!\n\r\n\x05\x04\x01\x02\x05\x03\x12\x04\xfc\x01$\
    %\n\x0c\n\x02\x04\x02\x12\x06\xff\x01\0\xac\x02\x01\n\x0b\n\x03\x04\x02\
    \x01\x12\x04\xff\x01\x08\x14\n4\n\x03\x04\x02\n\x12\x04\x81\x02\x023\x1a\
    '\x20Removed,\x20use\x20optimizer_options\x20below.\n\n\x0c\n\x04\x04\
    \x02\n\0\x12\x04\x81\x02\x0b2\n\x0b\n\x03\x04\x02\t\x12\x04\x82\x02\x02\
    \r\n\x0c\n\x04\x04\x02\t\0\x12\x04\x82\x02\x0b\x0c\n\r\n\x05\x04\x02\t\0\
    \x01\x12\x04\x82\x02\x0b\x0c\n\r\n\x05\x04\x02\t\0\x02\x12\x04\x82\x02\
    \x0b\x0c\ni\n\x04\x04\x02\x02\0\x12\x04\x86\x02\x02\"\x1a[\x20If\x20true\
    ,\x20use\x20control\x20flow\x20to\x20schedule\x20the\x20activation\x20of\
    \x20Recv\x20nodes.\n\x20(Currently\x20ignored.)\n\n\r\n\x05\x04\x02\x02\
    \0\x05\x12\x04\x86\x02\x02\x06\n\r\n\x05\x04\x02\x02\0\x01\x12\x04\x86\
    \x02\x07\x1d\n\r\n\x05\x04\x02\x02\0\x03\x12\x04\x86\x02\x20!\n;\n\x04\
    \x04\x02\x02\x01\x12\x04\x89\x02\x02)\x1a-\x20Options\x20controlling\x20\
    how\x20graph\x20is\x20optimized.\n\n\r\n\x05\x04\x02\x02\x01\x06\x12\x04\
    \x89\x02\x02\x12\n\r\n\x05\x04\x02\x02\x01\x01\x12\x04\x89\x02\x13$\n\r\
    \n\x05\x04\x02\x02\x01\x03\x12\x04\x89\x02'(\n\xa8\x01\n\x04\x04\x02\x02\
    \x02\x12\x04\x8e\x02\x02\x1d\x1a\x99\x01\x20The\x20number\x20of\x20steps\
    \x20to\x20run\x20before\x20returning\x20a\x20cost\x20model\x20detailing\
    \n\x20the\x20memory\x20usage\x20and\x20performance\x20of\x20each\x20node\
    \x20of\x20the\x20graph.\x200\x20means\n\x20no\x20cost\x20model.\n\n\r\n\
    \x05\x04\x02\x02\x02\x05\x12\x04\x8e\x02\x02\x07\n\r\n\x05\x04\x02\x02\
    \x02\x01\x12\x04\x8e\x02\x08\x18\n\r\n\x05\x04\x02\x02\x02\x03\x12\x04\
    \x8e\x02\x1b\x1c\n]\n\x04\x04\x02\x02\x03\x12\x04\x92\x02\x02#\x1aO\x20T\
    he\x20number\x20of\x20steps\x20to\x20skip\x20before\x20collecting\x20sta\
    tistics\x20for\x20the\n\x20cost\x20model.\n\n\r\n\x05\x04\x02\x02\x03\
    \x05\x12\x04\x92\x02\x02\x07\n\r\n\x05\x04\x02\x02\x03\x01\x12\x04\x92\
    \x02\x08\x1e\n\r\n\x05\x04\x02\x02\x03\x03\x12\x04\x92\x02!\"\nk\n\x04\
    \x04\x02\x02\x04\x12\x04\x96\x02\x02\x18\x1a]\x20Annotate\x20each\x20Nod\
    e\x20with\x20Op\x20output\x20shape\x20data,\x20to\x20the\x20extent\x20it\
    \x20can\n\x20be\x20statically\x20inferred.\n\n\r\n\x05\x04\x02\x02\x04\
    \x05\x12\x04\x96\x02\x02\x06\n\r\n\x05\x04\x02\x02\x04\x01\x12\x04\x96\
    \x02\x07\x13\n\r\n\x05\x04\x02\x02\x04\x03\x12\x04\x96\x02\x16\x17\n\xee\
    \x02\n\x04\x04\x02\x02\x05\x12\x04\x9f\x02\x02\x1e\x1a\xdf\x02\x20Only\
    \x20place\x20the\x20subgraphs\x20that\x20are\x20run,\x20rather\x20than\
    \x20the\x20entire\x20graph.\n\n\x20This\x20is\x20useful\x20for\x20intera\
    ctive\x20graph\x20building,\x20where\x20one\x20might\n\x20produce\x20gra\
    phs\x20that\x20cannot\x20be\x20placed\x20during\x20the\x20debugging\n\
    \x20process.\x20\x20In\x20particular,\x20it\x20allows\x20the\x20client\
    \x20to\x20continue\x20work\x20in\n\x20a\x20session\x20after\x20adding\
    \x20a\x20node\x20to\x20a\x20graph\x20whose\x20placement\n\x20constraints\
    \x20are\x20unsatisfiable.\n\n\r\n\x05\x04\x02\x02\x05\x05\x12\x04\x9f\
    \x02\x02\x06\n\r\n\x05\x04\x02\x02\x05\x01\x12\x04\x9f\x02\x07\x19\n\r\n\
    \x05\x04\x02\x02\x05\x03\x12\x04\x9f\x02\x1c\x1d\nM\n\x04\x04\x02\x02\
    \x06\x12\x04\xa2\x02\x02$\x1a?\x20If\x20true,\x20transfer\x20float\x20va\
    lues\x20between\x20processes\x20as\x20bfloat16.\n\n\r\n\x05\x04\x02\x02\
    \x06\x05\x12\x04\xa2\x02\x02\x06\n\r\n\x05\x04\x02\x02\x06\x01\x12\x04\
    \xa2\x02\x07\x1f\n\r\n\x05\x04\x02\x02\x06\x03\x12\x04\xa2\x02\"#\n~\n\
    \x04\x04\x02\x02\x07\x12\x04\xa6\x02\x02\x1a\x1ap\x20If\x20>\x200,\x20re\
    cord\x20a\x20timeline\x20every\x20this\x20many\x20steps.\n\x20EXPERIMENT\
    AL:\x20This\x20currently\x20has\x20no\x20effect\x20in\x20MasterSession.\
    \n\n\r\n\x05\x04\x02\x02\x07\x05\x12\x04\xa6\x02\x02\x07\n\r\n\x05\x04\
    \x02\x02\x07\x01\x12\x04\xa6\x02\x08\x15\n\r\n\x05\x04\x02\x02\x07\x03\
    \x12\x04\xa6\x02\x18\x19\n\xd8\x01\n\x04\x04\x02\x02\x08\x12\x04\xab\x02\
    \x02&\x1a\xc9\x01\x20Options\x20that\x20control\x20the\x20type\x20and\
    \x20amount\x20of\x20graph\x20rewriting.\n\x20Not\x20currently\x20configu\
    rable\x20via\x20the\x20public\x20Python\x20API\x20(i.e.\x20there\x20is\
    \x20no\x20API\n\x20stability\x20guarantee\x20if\x20you\x20import\x20Rewr\
    iterConfig\x20explicitly).\n\n\r\n\x05\x04\x02\x02\x08\x06\x12\x04\xab\
    \x02\x02\x10\n\r\n\x05\x04\x02\x02\x08\x01\x12\x04\xab\x02\x11\x20\n\r\n\
    \x05\x04\x02\x02\x08\x03\x12\x04\xab\x02#%\n\x0c\n\x02\x04\x03\x12\x06\
    \xae\x02\0\xc5\x02\x01\n\x0b\n\x03\x04\x03\x01\x12\x04\xae\x02\x08\x1d\n\
    \xbb\x01\n\x04\x04\x03\x02\0\x12\x04\xb3\x02\x02\x18\x1a\xac\x01\x20The\
    \x20number\x20of\x20threads\x20in\x20the\x20pool.\n\n\x200\x20means\x20t\
    he\x20system\x20picks\x20a\x20value\x20based\x20on\x20where\x20this\x20o\
    ption\x20proto\x20is\x20used\n\x20(see\x20the\x20declaration\x20of\x20th\
    e\x20specific\x20field\x20for\x20more\x20info).\n\n\r\n\x05\x04\x03\x02\
    \0\x05\x12\x04\xb3\x02\x02\x07\n\r\n\x05\x04\x03\x02\0\x01\x12\x04\xb3\
    \x02\x08\x13\n\r\n\x05\x04\x03\x02\0\x03\x12\x04\xb3\x02\x16\x17\n\xf7\
    \x05\n\x04\x04\x03\x02\x01\x12\x04\xc4\x02\x02\x19\x1a\xe8\x05\x20The\
    \x20global\x20name\x20of\x20the\x20threadpool.\n\n\x20If\x20empty,\x20th\
    en\x20the\x20threadpool\x20is\x20made\x20and\x20used\x20according\x20to\
    \x20the\x20scope\x20it's\n\x20in\x20-\x20e.g.,\x20for\x20a\x20session\
    \x20threadpool,\x20it\x20is\x20used\x20by\x20that\x20session\x20only.\n\
    \n\x20If\x20non-empty,\x20then:\n\x20-\x20a\x20global\x20threadpool\x20a\
    ssociated\x20with\x20this\x20name\x20is\x20looked\n\x20\x20\x20up\x20or\
    \x20created.\x20This\x20allows,\x20for\x20example,\x20sharing\x20one\x20\
    threadpool\x20across\n\x20\x20\x20many\x20sessions\x20(e.g.,\x20like\x20\
    the\x20default\x20behavior,\x20if\n\x20\x20\x20inter_op_parallelism_thre\
    ads\x20is\x20not\x20configured),\x20but\x20still\x20partitioning\n\x20\
    \x20\x20into\x20a\x20large\x20and\x20small\x20pool.\n\x20-\x20if\x20the\
    \x20threadpool\x20for\x20this\x20global_name\x20already\x20exists,\x20th\
    en\x20it\x20is\x20an\n\x20\x20\x20error\x20if\x20the\x20existing\x20pool\
    \x20was\x20created\x20using\x20a\x20different\x20num_threads\n\x20\x20\
    \x20value\x20as\x20is\x20specified\x20on\x20this\x20call.\n\x20-\x20thre\
    adpools\x20created\x20this\x20way\x20are\x20never\x20garbage\x20collecte\
    d.\n\n\r\n\x05\x04\x03\x02\x01\x05\x12\x04\xc4\x02\x02\x08\n\r\n\x05\x04\
    \x03\x02\x01\x01\x12\x04\xc4\x02\t\x14\n\r\n\x05\x04\x03\x02\x01\x03\x12\
    \x04\xc4\x02\x17\x18\n\x0c\n\x02\x04\x04\x12\x06\xc7\x02\0\xe0\x02\x01\n\
    \x0b\n\x03\x04\x04\x01\x12\x04\xc7\x02\x08\x12\n\x88\x02\n\x04\x04\x04\
    \x02\0\x12\x04\xcd\x02\x02(\x1a\xf9\x01\x20If\x20true,\x20always\x20use\
    \x20RPC\x20to\x20contact\x20the\x20session\x20target.\n\n\x20If\x20false\
    \x20(the\x20default\x20option),\x20TensorFlow\x20may\x20use\x20an\x20opt\
    imized\n\x20transport\x20for\x20client-master\x20communication\x20that\
    \x20avoids\x20the\x20RPC\n\x20stack.\x20This\x20option\x20is\x20primaril\
    y\x20for\x20used\x20testing\x20the\x20RPC\x20stack.\n\n\r\n\x05\x04\x04\
    \x02\0\x05\x12\x04\xcd\x02\x02\x06\n\r\n\x05\x04\x04\x02\0\x01\x12\x04\
    \xcd\x02\x07#\n\r\n\x05\x04\x04\x02\0\x03\x12\x04\xcd\x02&'\nO\n\x04\x04\
    \x04\x02\x01\x12\x04\xd0\x02\x02#\x1aA\x20The\x20compression\x20algorith\
    m\x20to\x20be\x20used.\x20One\x20of\x20\"deflate\",\x20\"gzip\".\n\n\r\n\
    \x05\x04\x04\x02\x01\x05\x12\x04\xd0\x02\x02\x08\n\r\n\x05\x04\x04\x02\
    \x01\x01\x12\x04\xd0\x02\t\x1e\n\r\n\x05\x04\x04\x02\x01\x03\x12\x04\xd0\
    \x02!\"\nu\n\x04\x04\x04\x02\x02\x12\x04\xd4\x02\x02\x1e\x1ag\x20If\x20c\
    ompression_algorithm\x20is\x20set,\x20the\x20compression\x20level\x20to\
    \x20be\x20used.\n\x20From\x200\x20(no\x20compression),\x20up\x20to\x203.\
    \n\n\r\n\x05\x04\x04\x02\x02\x05\x12\x04\xd4\x02\x02\x07\n\r\n\x05\x04\
    \x04\x02\x02\x01\x12\x04\xd4\x02\x08\x19\n\r\n\x05\x04\x04\x02\x02\x03\
    \x12\x04\xd4\x02\x1c\x1d\n\xc3\x03\n\x04\x04\x04\x02\x03\x12\x04\xdc\x02\
    \x02\x1e\x1a\xb4\x03\x20Setting\x20cache_rpc_response\x20to\x20true\x20w\
    ill\x20enable\x20sender\x20side\x20caching\x20of\n\x20response\x20for\
    \x20RecvTensorAsync\x20and\x20RecvBufAsync\x20to\x20allow\x20receiver\
    \x20to\x20retry\n\x20requests\x20.\x20This\x20is\x20only\x20necessary\
    \x20when\x20the\x20network\x20fabric\x20is\x20experiencing\x20a\n\x20sig\
    nificant\x20error\x20rate.\x20\x20Without\x20it\x20we'll\x20fail\x20a\
    \x20step\x20on\x20an\x20network\x20error,\n\x20while\x20with\x20it\x20we\
    'll\x20be\x20able\x20to\x20complete\x20long\x20steps\x20(like\x20complex\
    \n\x20initializations)\x20in\x20the\x20face\x20of\x20some\x20network\x20\
    errors\x20during\x20RecvTensor.\n\n\r\n\x05\x04\x04\x02\x03\x05\x12\x04\
    \xdc\x02\x02\x06\n\r\n\x05\x04\x04\x02\x03\x01\x12\x04\xdc\x02\x07\x19\n\
    \r\n\x05\x04\x04\x02\x03\x03\x12\x04\xdc\x02\x1c\x1d\nO\n\x04\x04\x04\
    \x02\x04\x12\x04\xdf\x02\x02.\x1aA\x20Disables\x20TCP\x20connection\x20s\
    haring\x20when\x20opening\x20a\x20new\x20RPC\x20channel.\n\n\r\n\x05\x04\
    \x04\x02\x04\x05\x12\x04\xdf\x02\x02\x06\n\r\n\x05\x04\x04\x02\x04\x01\
    \x12\x04\xdf\x02\x07)\n\r\n\x05\x04\x04\x02\x04\x03\x12\x04\xdf\x02,-\n\
    \xad\x02\n\x02\x04\x05\x12\x06\xea\x02\0\xef\x02\x01\x1a\x9e\x02\x20Meta\
    data\x20about\x20the\x20session.\n\n\x20This\x20can\x20be\x20used\x20by\
    \x20the\x20runtime\x20and\x20the\x20Ops\x20for\x20debugging,\x20monitori\
    ng,\x20etc.\n\n\x20The\x20(name,\x20version)\x20tuple\x20is\x20expected\
    \x20to\x20be\x20a\x20unique\x20identifier\x20for\n\x20sessions\x20within\
    \x20the\x20same\x20process.\n\n\x20NOTE:\x20This\x20is\x20currently\x20u\
    sed\x20and\x20propagated\x20only\x20by\x20the\x20direct\x20session.\n\n\
    \x0b\n\x03\x04\x05\x01\x12\x04\xea\x02\x08\x17\n\x0c\n\x04\x04\x05\x02\0\
    \x12\x04\xeb\x02\x02\x12\n\r\n\x05\x04\x05\x02\0\x05\x12\x04\xeb\x02\x02\
    \x08\n\r\n\x05\x04\x05\x02\0\x01\x12\x04\xeb\x02\t\r\n\r\n\x05\x04\x05\
    \x02\0\x03\x12\x04\xeb\x02\x10\x11\nB\n\x04\x04\x05\x02\x01\x12\x04\xee\
    \x02\x02\x14\x1a4\x20The\x20version\x20is\x20optional.\x20If\x20set,\x20\
    needs\x20to\x20be\x20>=\x200.\n\n\r\n\x05\x04\x05\x02\x01\x05\x12\x04\
    \xee\x02\x02\x07\n\r\n\x05\x04\x05\x02\x01\x01\x12\x04\xee\x02\x08\x0f\n\
    \r\n\x05\x04\x05\x02\x01\x03\x12\x04\xee\x02\x12\x13\ns\n\x02\x04\x06\
    \x12\x06\xf3\x02\0\xe0\x04\x01\x1ae\x20Session\x20configuration\x20param\
    eters.\n\x20The\x20system\x20picks\x20appropriate\x20values\x20for\x20fi\
    elds\x20that\x20are\x20not\x20set.\n\n\x0b\n\x03\x04\x06\x01\x12\x04\xf3\
    \x02\x08\x13\n\xd5\x01\n\x04\x04\x06\x02\0\x12\x04\xf8\x02\x02&\x1a\xc6\
    \x01\x20Map\x20from\x20device\x20type\x20name\x20(e.g.,\x20\"CPU\"\x20or\
    \x20\"GPU\"\x20)\x20to\x20maximum\n\x20number\x20of\x20devices\x20of\x20\
    that\x20type\x20to\x20use.\x20\x20If\x20a\x20particular\x20device\n\x20t\
    ype\x20is\x20not\x20found\x20in\x20the\x20map,\x20the\x20system\x20picks\
    \x20an\x20appropriate\n\x20number.\n\n\r\n\x05\x04\x06\x02\0\x06\x12\x04\
    \xf8\x02\x02\x14\n\r\n\x05\x04\x06\x02\0\x01\x12\x04\xf8\x02\x15!\n\r\n\
    \x05\x04\x06\x02\0\x03\x12\x04\xf8\x02$%\n\xee\x05\n\x04\x04\x06\x02\x01\
    \x12\x04\x88\x03\x02)\x1a\xdf\x05\x20The\x20execution\x20of\x20an\x20ind\
    ividual\x20op\x20(for\x20some\x20op\x20types)\x20can\x20be\n\x20parallel\
    ized\x20on\x20a\x20pool\x20of\x20intra_op_parallelism_threads.\n\x200\
    \x20means\x20the\x20system\x20picks\x20an\x20appropriate\x20number.\n\n\
    \x20If\x20you\x20create\x20an\x20ordinary\x20session,\x20e.g.,\x20from\
    \x20Python\x20or\x20C++,\n\x20then\x20there\x20is\x20exactly\x20one\x20i\
    ntra\x20op\x20thread\x20pool\x20per\x20process.\n\x20The\x20first\x20ses\
    sion\x20created\x20determines\x20the\x20number\x20of\x20threads\x20in\
    \x20this\x20pool.\n\x20All\x20subsequent\x20sessions\x20reuse/share\x20t\
    his\x20one\x20global\x20pool.\n\n\x20There\x20are\x20notable\x20exceptio\
    ns\x20to\x20the\x20default\x20behavior\x20describe\x20above:\n\x201.\x20\
    There\x20is\x20an\x20environment\x20variable\x20\x20for\x20overriding\
    \x20this\x20thread\x20pool,\n\x20\x20\x20\x20named\x20TF_OVERRIDE_GLOBAL\
    _THREADPOOL.\n\x202.\x20When\x20connecting\x20to\x20a\x20server,\x20such\
    \x20as\x20a\x20remote\x20`tf.train.Server`\n\x20\x20\x20\x20instance,\
    \x20then\x20this\x20option\x20will\x20be\x20ignored\x20altogether.\n\n\r\
    \n\x05\x04\x06\x02\x01\x05\x12\x04\x88\x03\x02\x07\n\r\n\x05\x04\x06\x02\
    \x01\x01\x12\x04\x88\x03\x08$\n\r\n\x05\x04\x06\x02\x01\x03\x12\x04\x88\
    \x03'(\n\xbd\x03\n\x04\x04\x06\x02\x02\x12\x04\x93\x03\x02)\x1a\xae\x03\
    \x20Nodes\x20that\x20perform\x20blocking\x20operations\x20are\x20enqueue\
    d\x20on\x20a\x20pool\x20of\n\x20inter_op_parallelism_threads\x20availabl\
    e\x20in\x20each\x20process.\n\n\x200\x20means\x20the\x20system\x20picks\
    \x20an\x20appropriate\x20number.\n\x20Negative\x20means\x20all\x20operat\
    ions\x20are\x20performed\x20in\x20caller's\x20thread.\n\n\x20Note\x20tha\
    t\x20the\x20first\x20Session\x20created\x20in\x20the\x20process\x20sets\
    \x20the\n\x20number\x20of\x20threads\x20for\x20all\x20future\x20sessions\
    \x20unless\x20use_per_session_threads\x20is\n\x20true\x20or\x20session_i\
    nter_op_thread_pool\x20is\x20configured.\n\n\r\n\x05\x04\x06\x02\x02\x05\
    \x12\x04\x93\x03\x02\x07\n\r\n\x05\x04\x06\x02\x02\x01\x12\x04\x93\x03\
    \x08$\n\r\n\x05\x04\x06\x02\x02\x03\x12\x04\x93\x03'(\n\xd0\x03\n\x04\
    \x04\x06\x02\x03\x12\x04\x9e\x03\x02#\x1a\xc1\x03\x20If\x20true,\x20use\
    \x20a\x20new\x20set\x20of\x20threads\x20for\x20this\x20session\x20rather\
    \x20than\x20the\x20global\n\x20pool\x20of\x20threads.\x20Only\x20support\
    ed\x20by\x20direct\x20sessions.\n\n\x20If\x20false,\x20use\x20the\x20glo\
    bal\x20threads\x20created\x20by\x20the\x20first\x20session,\x20or\x20the\
    \n\x20per-session\x20thread\x20pools\x20configured\x20by\x20session_inte\
    r_op_thread_pool.\n\n\x20This\x20option\x20is\x20deprecated.\x20The\x20s\
    ame\x20effect\x20can\x20be\x20achieved\x20by\x20setting\n\x20session_int\
    er_op_thread_pool\x20to\x20have\x20one\x20element,\x20whose\x20num_threa\
    ds\x20equals\n\x20inter_op_parallelism_threads.\n\n\r\n\x05\x04\x06\x02\
    \x03\x05\x12\x04\x9e\x03\x02\x06\n\r\n\x05\x04\x06\x02\x03\x01\x12\x04\
    \x9e\x03\x07\x1e\n\r\n\x05\x04\x06\x02\x03\x03\x12\x04\x9e\x03!\"\n\x91\
    \x08\n\x04\x04\x06\x02\x04\x12\x04\xb3\x03\x02C\x1a\x82\x08\x20This\x20o\
    ption\x20is\x20experimental\x20-\x20it\x20may\x20be\x20replaced\x20with\
    \x20a\x20different\x20mechanism\n\x20in\x20the\x20future.\n\n\x20Configu\
    res\x20session\x20thread\x20pools.\x20If\x20this\x20is\x20configured,\
    \x20then\x20RunOptions\x20for\n\x20a\x20Run\x20call\x20can\x20select\x20\
    the\x20thread\x20pool\x20to\x20use.\n\n\x20The\x20intended\x20use\x20is\
    \x20for\x20when\x20some\x20session\x20invocations\x20need\x20to\x20run\
    \x20in\x20a\n\x20background\x20pool\x20limited\x20to\x20a\x20small\x20nu\
    mber\x20of\x20threads:\n\x20-\x20For\x20example,\x20a\x20session\x20may\
    \x20be\x20configured\x20to\x20have\x20one\x20large\x20pool\x20(for\n\x20\
    regular\x20compute)\x20and\x20one\x20small\x20pool\x20(for\x20periodic,\
    \x20low\x20priority\x20work);\n\x20using\x20the\x20small\x20pool\x20is\
    \x20currently\x20the\x20mechanism\x20for\x20limiting\x20the\x20inter-op\
    \n\x20parallelism\x20of\x20the\x20low\x20priority\x20work.\x20\x20Note\
    \x20that\x20it\x20does\x20not\x20limit\x20the\n\x20parallelism\x20of\x20\
    work\x20spawned\x20by\x20a\x20single\x20op\x20kernel\x20implementation.\
    \n\x20-\x20Using\x20this\x20setting\x20is\x20normally\x20not\x20needed\
    \x20in\x20training,\x20but\x20may\x20help\x20some\n\x20serving\x20use\
    \x20cases.\n\x20-\x20It\x20is\x20also\x20generally\x20recommended\x20to\
    \x20set\x20the\x20global_name\x20field\x20of\x20this\n\x20proto,\x20to\
    \x20avoid\x20creating\x20multiple\x20large\x20pools.\x20It\x20is\x20typi\
    cally\x20better\x20to\n\x20run\x20the\x20non-low-priority\x20work,\x20ev\
    en\x20across\x20sessions,\x20in\x20a\x20single\x20large\n\x20pool.\n\n\r\
    \n\x05\x04\x06\x02\x04\x04\x12\x04\xb3\x03\x02\n\n\r\n\x05\x04\x06\x02\
    \x04\x06\x12\x04\xb3\x03\x0b\x20\n\r\n\x05\x04\x06\x02\x04\x01\x12\x04\
    \xb3\x03!=\n\r\n\x05\x04\x06\x02\x04\x03\x12\x04\xb3\x03@B\n\xbd\x01\n\
    \x04\x04\x06\x02\x05\x12\x04\xb8\x03\x02\x1d\x1a\xae\x01\x20Assignment\
    \x20of\x20Nodes\x20to\x20Devices\x20is\x20recomputed\x20every\x20placeme\
    nt_period\n\x20steps\x20until\x20the\x20system\x20warms\x20up\x20(at\x20\
    which\x20point\x20the\x20recomputation\n\x20typically\x20slows\x20down\
    \x20automatically).\n\n\r\n\x05\x04\x06\x02\x05\x05\x12\x04\xb8\x03\x02\
    \x07\n\r\n\x05\x04\x06\x02\x05\x01\x12\x04\xb8\x03\x08\x18\n\r\n\x05\x04\
    \x06\x02\x05\x03\x12\x04\xb8\x03\x1b\x1c\n\xc5\x01\n\x04\x04\x06\x02\x06\
    \x12\x04\xbd\x03\x02%\x1a\xb6\x01\x20When\x20any\x20filters\x20are\x20pr\
    esent\x20sessions\x20will\x20ignore\x20all\x20devices\x20which\x20do\x20\
    not\n\x20match\x20the\x20filters.\x20Each\x20filter\x20can\x20be\x20part\
    ially\x20specified,\x20e.g.\x20\"/job:ps\"\n\x20\"/job:worker/replica:3\
    \",\x20etc.\n\n\r\n\x05\x04\x06\x02\x06\x04\x12\x04\xbd\x03\x02\n\n\r\n\
    \x05\x04\x06\x02\x06\x05\x12\x04\xbd\x03\x0b\x11\n\r\n\x05\x04\x06\x02\
    \x06\x01\x12\x04\xbd\x03\x12\x20\n\r\n\x05\x04\x06\x02\x06\x03\x12\x04\
    \xbd\x03#$\n/\n\x04\x04\x06\x02\x07\x12\x04\xc0\x03\x02\x1d\x1a!\x20Opti\
    ons\x20that\x20apply\x20to\x20all\x20GPUs.\n\n\r\n\x05\x04\x06\x02\x07\
    \x06\x12\x04\xc0\x03\x02\x0c\n\r\n\x05\x04\x06\x02\x07\x01\x12\x04\xc0\
    \x03\r\x18\n\r\n\x05\x04\x06\x02\x07\x03\x12\x04\xc0\x03\x1b\x1c\n\x9a\
    \x02\n\x04\x04\x06\x02\x08\x12\x04\xc9\x03\x02\x20\x1a\x8b\x02\x20Whethe\
    r\x20soft\x20placement\x20is\x20allowed.\x20If\x20allow_soft_placement\
    \x20is\x20true,\n\x20an\x20op\x20will\x20be\x20placed\x20on\x20CPU\x20if\
    \n\x20\x20\x201.\x20there's\x20no\x20GPU\x20implementation\x20for\x20the\
    \x20OP\n\x20or\n\x20\x20\x202.\x20no\x20GPU\x20devices\x20are\x20known\
    \x20or\x20registered\n\x20or\n\x20\x20\x203.\x20need\x20to\x20co-locate\
    \x20with\x20reftype\x20input(s)\x20which\x20are\x20from\x20CPU.\n\n\r\n\
    \x05\x04\x06\x02\x08\x05\x12\x04\xc9\x03\x02\x06\n\r\n\x05\x04\x06\x02\
    \x08\x01\x12\x04\xc9\x03\x07\x1b\n\r\n\x05\x04\x06\x02\x08\x03\x12\x04\
    \xc9\x03\x1e\x1f\n;\n\x04\x04\x06\x02\t\x12\x04\xcc\x03\x02\x20\x1a-\x20\
    Whether\x20device\x20placements\x20should\x20be\x20logged.\n\n\r\n\x05\
    \x04\x06\x02\t\x05\x12\x04\xcc\x03\x02\x06\n\r\n\x05\x04\x06\x02\t\x01\
    \x12\x04\xcc\x03\x07\x1b\n\r\n\x05\x04\x06\x02\t\x03\x12\x04\xcc\x03\x1e\
    \x1f\n1\n\x04\x04\x06\x02\n\x12\x04\xcf\x03\x02\"\x1a#\x20Options\x20tha\
    t\x20apply\x20to\x20all\x20graphs.\n\n\r\n\x05\x04\x06\x02\n\x06\x12\x04\
    \xcf\x03\x02\x0e\n\r\n\x05\x04\x06\x02\n\x01\x12\x04\xcf\x03\x0f\x1c\n\r\
    \n\x05\x04\x06\x02\n\x03\x12\x04\xcf\x03\x1f!\n\xce\x01\n\x04\x04\x06\
    \x02\x0b\x12\x04\xd4\x03\x02%\x1a\xbf\x01\x20Global\x20timeout\x20for\
    \x20all\x20blocking\x20operations\x20in\x20this\x20session.\x20\x20If\
    \x20non-zero,\n\x20and\x20not\x20overridden\x20on\x20a\x20per-operation\
    \x20basis,\x20this\x20value\x20will\x20be\x20used\x20as\x20the\n\x20dead\
    line\x20for\x20all\x20blocking\x20operations.\n\n\r\n\x05\x04\x06\x02\
    \x0b\x05\x12\x04\xd4\x03\x02\x07\n\r\n\x05\x04\x06\x02\x0b\x01\x12\x04\
    \xd4\x03\x08\x1f\n\r\n\x05\x04\x06\x02\x0b\x03\x12\x04\xd4\x03\"$\nR\n\
    \x04\x04\x06\x02\x0c\x12\x04\xd7\x03\x02\x1e\x1aD\x20Options\x20that\x20\
    apply\x20when\x20this\x20session\x20uses\x20the\x20distributed\x20runtim\
    e.\n\n\r\n\x05\x04\x06\x02\x0c\x06\x12\x04\xd7\x03\x02\x0c\n\r\n\x05\x04\
    \x06\x02\x0c\x01\x12\x04\xd7\x03\r\x18\n\r\n\x05\x04\x06\x02\x0c\x03\x12\
    \x04\xd7\x03\x1b\x1d\nD\n\x04\x04\x06\x02\r\x12\x04\xda\x03\x02\x1e\x1a6\
    \x20Optional\x20list\x20of\x20all\x20workers\x20to\x20use\x20in\x20this\
    \x20session.\n\n\r\n\x05\x04\x06\x02\r\x06\x12\x04\xda\x03\x02\x0c\n\r\n\
    \x05\x04\x06\x02\r\x01\x12\x04\xda\x03\r\x18\n\r\n\x05\x04\x06\x02\r\x03\
    \x12\x04\xda\x03\x1b\x1d\n\xe1\x01\n\x04\x04\x06\x02\x0e\x12\x04\xdf\x03\
    \x02\"\x1a\xd2\x01\x20If\x20true,\x20any\x20resources\x20such\x20as\x20V\
    ariables\x20used\x20in\x20the\x20session\x20will\x20not\x20be\n\x20share\
    d\x20with\x20other\x20sessions.\x20However,\x20when\x20clusterspec\x20pr\
    opagation\x20is\n\x20enabled,\x20this\x20field\x20is\x20ignored\x20and\
    \x20sessions\x20are\x20always\x20isolated.\n\n\r\n\x05\x04\x06\x02\x0e\
    \x05\x12\x04\xdf\x03\x02\x06\n\r\n\x05\x04\x06\x02\x0e\x01\x12\x04\xdf\
    \x03\x07\x1c\n\r\n\x05\x04\x06\x02\x0e\x03\x12\x04\xdf\x03\x1f!\n\xca\
    \x01\n\x04\x04\x06\x02\x0f\x12\x04\xe5\x03\x02-\x1a\xbb\x01\x20When\x20t\
    rue,\x20WorkerSessions\x20are\x20created\x20with\x20device\x20attributes\
    \x20from\x20the\n\x20full\x20cluster.\n\x20This\x20is\x20helpful\x20when\
    \x20a\x20worker\x20wants\x20to\x20partition\x20a\x20graph\n\x20(for\x20e\
    xample\x20during\x20a\x20PartitionedCallOp).\n\n\r\n\x05\x04\x06\x02\x0f\
    \x05\x12\x04\xe5\x03\x02\x06\n\r\n\x05\x04\x06\x02\x0f\x01\x12\x04\xe5\
    \x03\x07'\n\r\n\x05\x04\x06\x02\x0f\x03\x12\x04\xe5\x03*,\n\xab\x01\n\
    \x04\x04\x06\x03\x01\x12\x06\xea\x03\x02\xdb\x04\x03\x1a\x9a\x01\x20Ever\
    ything\x20inside\x20Experimental\x20is\x20subject\x20to\x20change\x20and\
    \x20is\x20not\x20subject\n\x20to\x20API\x20stability\x20guarantees\x20in\
    \n\x20https://www.tensorflow.org/guide/version_compat.\n\n\r\n\x05\x04\
    \x06\x03\x01\x01\x12\x04\xea\x03\n\x16\n1\n\x06\x04\x06\x03\x01\x02\0\
    \x12\x04\xec\x03\x04'\x1a!\x20Task\x20name\x20for\x20group\x20resolution\
    .\n\n\x0f\n\x07\x04\x06\x03\x01\x02\0\x05\x12\x04\xec\x03\x04\n\n\x0f\n\
    \x07\x04\x06\x03\x01\x02\0\x01\x12\x04\xec\x03\x0b\"\n\x0f\n\x07\x04\x06\
    \x03\x01\x02\0\x03\x12\x04\xec\x03%&\n\xd6\x01\n\x05\x04\x06\x03\x01\t\
    \x12\x04\xf2\x03\x04\x0f\x1a\xc6\x01\x20We\x20removed\x20the\x20flag\x20\
    client_handles_error_formatting.\x20Marking\x20the\x20tag\n\x20number\
    \x20as\x20reserved.\n\x20TODO(shikharagarwal):\x20Should\x20we\x20just\
    \x20remove\x20this\x20tag\x20so\x20that\x20it\x20can\x20be\n\x20used\x20\
    in\x20future\x20for\x20other\x20purpose?\n\n\x0e\n\x06\x04\x06\x03\x01\t\
    \0\x12\x04\xf2\x03\r\x0e\n\x0f\n\x07\x04\x06\x03\x01\t\0\x01\x12\x04\xf2\
    \x03\r\x0e\n\x0f\n\x07\x04\x06\x03\x01\t\0\x02\x12\x04\xf2\x03\r\x0e\nq\
    \n\x06\x04\x06\x03\x01\x02\x01\x12\x04\xf6\x03\x04\x1d\x1aa\x20Which\x20\
    executor\x20to\x20use,\x20the\x20default\x20executor\x20will\x20be\x20us\
    ed\n\x20if\x20it\x20is\x20an\x20empty\x20string\x20or\x20\"DEFAULT\"\n\n\
    \x0f\n\x07\x04\x06\x03\x01\x02\x01\x05\x12\x04\xf6\x03\x04\n\n\x0f\n\x07\
    \x04\x06\x03\x01\x02\x01\x01\x12\x04\xf6\x03\x0b\x18\n\x0f\n\x07\x04\x06\
    \x03\x01\x02\x01\x03\x12\x04\xf6\x03\x1b\x1c\n\xcc\x01\n\x06\x04\x06\x03\
    \x01\x02\x02\x12\x04\xfb\x03\x04!\x1a\xbb\x01\x20Guidance\x20to\x20forma\
    tting\x20of\x20large\x20RecvBuf\x20fields\x20for\x20transfer.\n\x20Any\
    \x20positive\x20value\x20sets\x20the\x20max\x20chunk\x20size.\x20\x200\
    \x20defaults\x20to\x204096.\n\x20Any\x20negative\x20value\x20indicates\
    \x20no\x20max,\x20i.e.\x20one\x20chunk\x20only.\n\n\x0f\n\x07\x04\x06\
    \x03\x01\x02\x02\x05\x12\x04\xfb\x03\x04\t\n\x0f\n\x07\x04\x06\x03\x01\
    \x02\x02\x01\x12\x04\xfb\x03\n\x1c\n\x0f\n\x07\x04\x06\x03\x01\x02\x02\
    \x03\x12\x04\xfb\x03\x1f\x20\n\xdd\x01\n\x06\x04\x06\x03\x01\x02\x03\x12\
    \x04\x80\x04\x04\x1f\x1a\xcc\x01\x20If\x20true,\x20and\x20supported\x20b\
    y\x20the\x20platform,\x20the\x20runtime\x20will\x20attempt\x20to\n\x20us\
    e\x20NUMA\x20affinity\x20where\x20applicable.\x20\x20One\x20consequence\
    \x20will\x20be\x20the\n\x20existence\x20of\x20as\x20many\x20CPU\x20devic\
    es\x20as\x20there\x20are\x20available\x20NUMA\x20nodes.\n\n\x0f\n\x07\
    \x04\x06\x03\x01\x02\x03\x05\x12\x04\x80\x04\x04\x08\n\x0f\n\x07\x04\x06\
    \x03\x01\x02\x03\x01\x12\x04\x80\x04\t\x1a\n\x0f\n\x07\x04\x06\x03\x01\
    \x02\x03\x03\x12\x04\x80\x04\x1d\x1e\n\x8c\x01\n\x06\x04\x06\x03\x01\x02\
    \x04\x12\x04\x84\x04\x04;\x1a|\x20If\x20true,\x20make\x20collective\x20o\
    p\x20execution\x20order\x20sequential\x20and\x20deterministic\n\x20for\
    \x20potentially\x20concurrent\x20collective\x20instances.\n\n\x0f\n\x07\
    \x04\x06\x03\x01\x02\x04\x05\x12\x04\x84\x04\x04\x08\n\x0f\n\x07\x04\x06\
    \x03\x01\x02\x04\x01\x12\x04\x84\x04\t6\n\x0f\n\x07\x04\x06\x03\x01\x02\
    \x04\x03\x12\x04\x84\x049:\n]\n\x06\x04\x06\x03\x01\x02\x05\x12\x04\x88\
    \x04\x04\x1d\x1aM\x20If\x20true,\x20use\x20NCCL\x20for\x20CollectiveOps.\
    \x20\x20This\x20feature\x20is\x20highly\n\x20experimental.\n\n\x0f\n\x07\
    \x04\x06\x03\x01\x02\x05\x05\x12\x04\x88\x04\x04\x08\n\x0f\n\x07\x04\x06\
    \x03\x01\x02\x05\x01\x12\x04\x88\x04\t\x18\n\x0f\n\x07\x04\x06\x03\x01\
    \x02\x05\x03\x12\x04\x88\x04\x1b\x1c\n\xda\x07\n\x06\x04\x06\x03\x01\x02\
    \x06\x12\x04\x9e\x04\x04<\x1a\xc9\x07\x20In\x20the\x20following,\x20sess\
    ion\x20state\x20means\x20the\x20value\x20of\x20a\x20variable,\x20element\
    s\n\x20in\x20a\x20hash\x20table,\x20or\x20any\x20other\x20resource,\x20a\
    ccessible\x20by\x20worker\x20sessions\n\x20held\x20by\x20a\x20TF\x20serv\
    er.\n\n\x20When\x20ClusterSpec\x20propagation\x20is\x20enabled,\x20the\
    \x20value\x20of\n\x20isolate_session_state\x20is\x20ignored\x20when\x20d\
    eciding\x20whether\x20to\x20share\x20session\n\x20states\x20in\x20a\x20T\
    F\x20server\x20(for\x20backwards\x20compatibility\x20reasons).\n\x20-\
    \x20If\x20share_session_state_in_clusterspec_propagation\x20is\x20true,\
    \x20the\x20session\n\x20states\x20are\x20shared.\n\x20-\x20If\x20share_s\
    ession_state_in_clusterspec_propagation\x20is\x20false,\x20session\n\x20\
    states\x20are\x20isolated.\n\n\x20When\x20clusterspec\x20propagation\x20\
    is\x20not\x20used,\x20the\x20value\x20of\n\x20share_session_state_in_clu\
    sterspec_propagation\x20is\x20ignored\x20when\x20deciding\n\x20whether\
    \x20to\x20share\x20session\x20states\x20in\x20a\x20TF\x20server.\n\x20-\
    \x20If\x20isolate_session_state\x20is\x20true,\x20session\x20states\x20a\
    re\x20isolated.\n\x20-\x20If\x20isolate_session_state\x20is\x20false,\
    \x20session\x20states\x20are\x20shared.\n\n\x20TODO(b/129330037):\x20Add\
    \x20a\x20single\x20API\x20that\x20consistently\x20treats\n\x20isolate_se\
    ssion_state\x20and\x20ClusterSpec\x20propagation.\n\n\x0f\n\x07\x04\x06\
    \x03\x01\x02\x06\x05\x12\x04\x9e\x04\x04\x08\n\x0f\n\x07\x04\x06\x03\x01\
    \x02\x06\x01\x12\x04\x9e\x04\t7\n\x0f\n\x07\x04\x06\x03\x01\x02\x06\x03\
    \x12\x04\x9e\x04:;\n\xf2\x01\n\x06\x04\x06\x03\x01\x02\x07\x12\x04\xa4\
    \x04\x04%\x1a\xe1\x01\x20If\x20using\x20a\x20direct\x20session,\x20disab\
    le\x20spinning\x20while\x20waiting\x20for\x20work\x20in\n\x20the\x20thre\
    ad\x20pool.\x20This\x20may\x20result\x20in\x20higher\x20latency\x20for\
    \x20completing\x20ops,\n\x20but\x20in\x20the\x20case\x20where\x20there\
    \x20is\x20a\x20lot\x20of\x20spinning\x20may\x20result\x20in\x20lower\n\
    \x20CPU\x20usage.\n\n\x0f\n\x07\x04\x06\x03\x01\x02\x07\x05\x12\x04\xa4\
    \x04\x04\x08\n\x0f\n\x07\x04\x06\x03\x01\x02\x07\x01\x12\x04\xa4\x04\t\
    \x20\n\x0f\n\x07\x04\x06\x03\x01\x02\x07\x03\x12\x04\xa4\x04#$\n\x80\x01\
    \n\x06\x04\x06\x03\x01\x02\x08\x12\x04\xa8\x04\x04/\x1ap\x20This\x20was\
    \x20promoted\x20to\x20a\x20non-experimental\x20API.\x20Please\x20use\n\
    \x20ConfigProto.share_cluster_devices_in_session\x20instead.\n\n\x0f\n\
    \x07\x04\x06\x03\x01\x02\x08\x05\x12\x04\xa8\x04\x04\x08\n\x0f\n\x07\x04\
    \x06\x03\x01\x02\x08\x01\x12\x04\xa8\x04\t)\n\x0f\n\x07\x04\x06\x03\x01\
    \x02\x08\x03\x12\x04\xa8\x04,.\n\xcf\x01\n\x06\x04\x06\x03\x01\x02\t\x12\
    \x04\xb0\x04\x04*\x1a\xbe\x01\x20Metadata\x20about\x20the\x20session.\n\
    \n\x20If\x20set,\x20this\x20can\x20be\x20used\x20by\x20the\x20runtime\
    \x20and\x20the\x20Ops\x20for\x20debugging,\n\x20monitoring,\x20etc.\n\n\
    \x20NOTE:\x20This\x20is\x20currently\x20used\x20and\x20propagated\x20onl\
    y\x20by\x20the\x20direct\x20session.\n\n\x0f\n\x07\x04\x06\x03\x01\x02\t\
    \x06\x12\x04\xb0\x04\x04\x13\n\x0f\n\x07\x04\x06\x03\x01\x02\t\x01\x12\
    \x04\xb0\x04\x14$\n\x0f\n\x07\x04\x06\x03\x01\x02\t\x03\x12\x04\xb0\x04'\
    )\n\x98\x02\n\x06\x04\x06\x03\x01\x02\n\x12\x04\xb8\x04\x04(\x1a\x87\x02\
    \x20If\x20true,\x20the\x20session\x20may\x20treat\x20the\x20graph\x20as\
    \x20being\x20static\x20for\x20optimization\n\x20purposes.\n\n\x20If\x20t\
    his\x20option\x20is\x20set\x20to\x20true\x20when\x20a\x20session\x20is\
    \x20created,\x20the\x20full\n\x20GraphDef\x20must\x20be\x20passed\x20in\
    \x20a\x20single\x20call\x20to\x20Session::Create(),\x20and\n\x20Session:\
    :Extend()\x20may\x20not\x20be\x20supported.\n\n\x0f\n\x07\x04\x06\x03\
    \x01\x02\n\x05\x12\x04\xb8\x04\x04\x08\n\x0f\n\x07\x04\x06\x03\x01\x02\n\
    \x01\x12\x04\xb8\x04\t\"\n\x0f\n\x07\x04\x06\x03\x01\x02\n\x03\x12\x04\
    \xb8\x04%'\n\x86\x05\n\x06\x04\x06\x03\x01\x02\x0b\x12\x04\xc5\x04\x04!\
    \x1a\xf5\x04\x20Whether\x20to\x20enable\x20the\x20MLIR-based\x20TF->XLA\
    \x20bridge.\n\n\x20This\x20is\x20a\x20replacement\x20to\x20the\x20existi\
    ng\x20bridge,\x20and\x20not\x20ready\x20for\n\x20production\x20usage\x20\
    yet.\n\x20If\x20this\x20option\x20is\x20set\x20to\x20true\x20when\x20a\
    \x20session\x20is\x20created,\x20MLIR\x20is\x20used\x20to\n\x20perform\
    \x20the\x20set\x20of\x20graph\x20transformations\x20to\x20put\x20the\x20\
    graph\x20in\x20a\x20form\x20that\n\x20can\x20be\x20executed\x20with\x20d\
    elegation\x20of\x20some\x20computations\x20to\x20an\x20accelerator.\n\
    \x20This\x20builds\x20on\x20the\x20model\x20of\x20XLA\x20where\x20a\x20s\
    ubset\x20of\x20the\x20graph\x20is\n\x20encapsulated\x20and\x20attached\
    \x20to\x20a\x20\"compile\"\x20operation,\x20whose\x20result\x20is\x20fed\
    \n\x20to\x20an\x20\"execute\"\x20operation.\x20The\x20kernel\x20for\x20t\
    hese\x20operations\x20is\x20responsible\n\x20to\x20lower\x20the\x20encap\
    sulated\x20graph\x20to\x20a\x20particular\x20device.\n\n\x0f\n\x07\x04\
    \x06\x03\x01\x02\x0b\x05\x12\x04\xc5\x04\x04\x08\n\x0f\n\x07\x04\x06\x03\
    \x01\x02\x0b\x01\x12\x04\xc5\x04\t\x1b\n\x0f\n\x07\x04\x06\x03\x01\x02\
    \x0b\x03\x12\x04\xc5\x04\x1e\x20\n\x98\x02\n\x06\x04\x06\x03\x01\x02\x0c\
    \x12\x04\xcc\x04\x04-\x1a\x87\x02\x20Whether\x20to\x20enable\x20the\x20M\
    LIR-based\x20Graph\x20optimizations.\n\n\x20This\x20will\x20become\x20a\
    \x20part\x20of\x20standard\x20Tensorflow\x20graph\x20optimization\n\x20p\
    ipeline,\x20currently\x20this\x20is\x20only\x20used\x20for\x20gradual\
    \x20migration\x20and\x20testing\n\x20new\x20passes\x20that\x20are\x20rep\
    lacing\x20existing\x20optimizations\x20in\x20Grappler.\n\n\x0f\n\x07\x04\
    \x06\x03\x01\x02\x0c\x05\x12\x04\xcc\x04\x04\x08\n\x0f\n\x07\x04\x06\x03\
    \x01\x02\x0c\x01\x12\x04\xcc\x04\t'\n\x0f\n\x07\x04\x06\x03\x01\x02\x0c\
    \x03\x12\x04\xcc\x04*,\n\xe8\x01\n\x06\x04\x06\x03\x01\x02\r\x12\x04\xd3\
    \x04\x04.\x1a\xd7\x01\x20If\x20true,\x20the\x20session\x20will\x20not\
    \x20store\x20an\x20additional\x20copy\x20of\x20the\x20graph\x20for\n\x20\
    each\x20subgraph.\n\n\x20If\x20this\x20option\x20is\x20set\x20to\x20true\
    \x20when\x20a\x20session\x20is\x20created,\x20the\n\x20`RunOptions.outpu\
    t_partition_graphs`\x20options\x20must\x20not\x20be\x20set.\n\n\x0f\n\
    \x07\x04\x06\x03\x01\x02\r\x05\x12\x04\xd3\x04\x04\x08\n\x0f\n\x07\x04\
    \x06\x03\x01\x02\r\x01\x12\x04\xd3\x04\t(\n\x0f\n\x07\x04\x06\x03\x01\
    \x02\r\x03\x12\x04\xd3\x04+-\n\x8d\x02\n\x06\x04\x06\x03\x01\x02\x0e\x12\
    \x04\xda\x04\x04+\x1a\xfc\x01\x20Minimum\x20number\x20of\x20batches\x20r\
    un\x20through\x20the\x20XLA\x20graph\x20before\x20XLA\x20fusion\n\x20aut\
    otuner\x20is\x20enabled.\x20Default\x20value\x20of\x20zero\x20disables\
    \x20the\x20autotuner.\n\n\x20The\x20XLA\x20fusion\x20autotuner\x20can\
    \x20improve\x20performance\x20by\x20executing\x20a\x20heuristic\n\x20sea\
    rch\x20on\x20the\x20compiler\x20parameters.\n\n\x0f\n\x07\x04\x06\x03\
    \x01\x02\x0e\x05\x12\x04\xda\x04\x04\t\n\x0f\n\x07\x04\x06\x03\x01\x02\
    \x0e\x01\x12\x04\xda\x04\n%\n\x0f\n\x07\x04\x06\x03\x01\x02\x0e\x03\x12\
    \x04\xda\x04(*\n\x0c\n\x04\x04\x06\x02\x10\x12\x04\xdd\x04\x02!\n\r\n\
    \x05\x04\x06\x02\x10\x06\x12\x04\xdd\x04\x02\x0e\n\r\n\x05\x04\x06\x02\
    \x10\x01\x12\x04\xdd\x04\x0f\x1b\n\r\n\x05\x04\x06\x02\x10\x03\x12\x04\
    \xdd\x04\x1e\x20\n0\n\x02\x04\x07\x12\x06\xe3\x04\0\xa1\x05\x01\x1a\"\
    \x20Options\x20for\x20a\x20single\x20Run()\x20call.\n\n\x0b\n\x03\x04\
    \x07\x01\x12\x04\xe3\x04\x08\x12\n\x84\x01\n\x04\x04\x07\x04\0\x12\x06\
    \xe6\x04\x02\xeb\x04\x03\x1at\x20TODO(pbar)\x20Turn\x20this\x20into\x20a\
    \x20TraceOptions\x20proto\x20which\x20allows\n\x20tracing\x20to\x20be\
    \x20controlled\x20in\x20a\x20more\x20orthogonal\x20manner?\n\n\r\n\x05\
    \x04\x07\x04\0\x01\x12\x04\xe6\x04\x07\x11\n\x0e\n\x06\x04\x07\x04\0\x02\
    \0\x12\x04\xe7\x04\x04\x11\n\x0f\n\x07\x04\x07\x04\0\x02\0\x01\x12\x04\
    \xe7\x04\x04\x0c\n\x0f\n\x07\x04\x07\x04\0\x02\0\x02\x12\x04\xe7\x04\x0f\
    \x10\n\x0e\n\x06\x04\x07\x04\0\x02\x01\x12\x04\xe8\x04\x04\x17\n\x0f\n\
    \x07\x04\x07\x04\0\x02\x01\x01\x12\x04\xe8\x04\x04\x12\n\x0f\n\x07\x04\
    \x07\x04\0\x02\x01\x02\x12\x04\xe8\x04\x15\x16\n\x0e\n\x06\x04\x07\x04\0\
    \x02\x02\x12\x04\xe9\x04\x04\x17\n\x0f\n\x07\x04\x07\x04\0\x02\x02\x01\
    \x12\x04\xe9\x04\x04\x12\n\x0f\n\x07\x04\x07\x04\0\x02\x02\x02\x12\x04\
    \xe9\x04\x15\x16\n\x0e\n\x06\x04\x07\x04\0\x02\x03\x12\x04\xea\x04\x04\
    \x13\n\x0f\n\x07\x04\x07\x04\0\x02\x03\x01\x12\x04\xea\x04\x04\x0e\n\x0f\
    \n\x07\x04\x07\x04\0\x02\x03\x02\x12\x04\xea\x04\x11\x12\n\x0c\n\x04\x04\
    \x07\x02\0\x12\x04\xec\x04\x02\x1d\n\r\n\x05\x04\x07\x02\0\x06\x12\x04\
    \xec\x04\x02\x0c\n\r\n\x05\x04\x07\x02\0\x01\x12\x04\xec\x04\r\x18\n\r\n\
    \x05\x04\x07\x02\0\x03\x12\x04\xec\x04\x1b\x1c\nG\n\x04\x04\x07\x02\x01\
    \x12\x04\xef\x04\x02\x1a\x1a9\x20Time\x20to\x20wait\x20for\x20operation\
    \x20to\x20complete\x20in\x20milliseconds.\n\n\r\n\x05\x04\x07\x02\x01\
    \x05\x12\x04\xef\x04\x02\x07\n\r\n\x05\x04\x07\x02\x01\x01\x12\x04\xef\
    \x04\x08\x15\n\r\n\x05\x04\x07\x02\x01\x03\x12\x04\xef\x04\x18\x19\n\x9d\
    \x03\n\x04\x04\x07\x02\x02\x12\x04\xf7\x04\x02!\x1a\x8e\x03\x20The\x20th\
    read\x20pool\x20to\x20use,\x20if\x20session_inter_op_thread_pool\x20is\
    \x20configured.\n\x20To\x20use\x20the\x20caller\x20thread\x20set\x20this\
    \x20to\x20-1\x20-\x20this\x20uses\x20the\x20caller\x20thread\n\x20to\x20\
    execute\x20Session::Run()\x20and\x20thus\x20avoids\x20a\x20context\x20sw\
    itch.\x20Using\x20the\n\x20caller\x20thread\x20to\x20execute\x20Session:\
    :Run()\x20should\x20be\x20done\x20ONLY\x20for\x20simple\n\x20graphs,\x20\
    where\x20the\x20overhead\x20of\x20an\x20additional\x20context\x20switch\
    \x20is\n\x20comparable\x20with\x20the\x20overhead\x20of\x20Session::Run(\
    ).\n\n\r\n\x05\x04\x07\x02\x02\x05\x12\x04\xf7\x04\x02\x07\n\r\n\x05\x04\
    \x07\x02\x02\x01\x12\x04\xf7\x04\x08\x1c\n\r\n\x05\x04\x07\x02\x02\x03\
    \x12\x04\xf7\x04\x1f\x20\np\n\x04\x04\x07\x02\x03\x12\x04\xfb\x04\x02#\
    \x1ab\x20Whether\x20the\x20partition\x20graph(s)\x20executed\x20by\x20th\
    e\x20executor(s)\x20should\x20be\n\x20outputted\x20via\x20RunMetadata.\n\
    \n\r\n\x05\x04\x07\x02\x03\x05\x12\x04\xfb\x04\x02\x06\n\r\n\x05\x04\x07\
    \x02\x03\x01\x12\x04\xfb\x04\x07\x1e\n\r\n\x05\x04\x07\x02\x03\x03\x12\
    \x04\xfb\x04!\"\nT\n\x04\x04\x07\x02\x04\x12\x04\xfe\x04\x02!\x1aF\x20EX\
    PERIMENTAL.\x20\x20Options\x20used\x20to\x20initialize\x20DebuggerState,\
    \x20if\x20enabled.\n\n\r\n\x05\x04\x07\x02\x04\x06\x12\x04\xfe\x04\x02\
    \x0e\n\r\n\x05\x04\x07\x02\x04\x01\x12\x04\xfe\x04\x0f\x1c\n\r\n\x05\x04\
    \x07\x02\x04\x03\x12\x04\xfe\x04\x1f\x20\n\xe7\x01\n\x04\x04\x07\x02\x05\
    \x12\x04\x85\x05\x02.\x1a\xd8\x01\x20When\x20enabled,\x20causes\x20tenso\
    r\x20allocation\x20information\x20to\x20be\x20included\x20in\n\x20the\
    \x20error\x20message\x20when\x20the\x20Run()\x20call\x20fails\x20because\
    \x20the\x20allocator\x20ran\n\x20out\x20of\x20memory\x20(OOM).\n\n\x20En\
    abling\x20this\x20option\x20can\x20slow\x20down\x20the\x20Run()\x20call.\
    \n\n\r\n\x05\x04\x07\x02\x05\x05\x12\x04\x85\x05\x02\x06\n\r\n\x05\x04\
    \x07\x02\x05\x01\x12\x04\x85\x05\x07)\n\r\n\x05\x04\x07\x02\x05\x03\x12\
    \x04\x85\x05,-\n\xab\x01\n\x04\x04\x07\x03\0\x12\x06\x8a\x05\x02\x9c\x05\
    \x03\x1a\x9a\x01\x20Everything\x20inside\x20Experimental\x20is\x20subjec\
    t\x20to\x20change\x20and\x20is\x20not\x20subject\n\x20to\x20API\x20stabi\
    lity\x20guarantees\x20in\n\x20https://www.tensorflow.org/guide/version_c\
    ompat.\n\n\r\n\x05\x04\x07\x03\0\x01\x12\x04\x8a\x05\n\x16\n\xec\x01\n\
    \x06\x04\x07\x03\0\x02\0\x12\x04\x8f\x05\x04#\x1a\xdb\x01\x20If\x20non-z\
    ero,\x20declares\x20that\x20this\x20graph\x20is\x20going\x20to\x20use\
    \x20collective\n\x20ops\x20and\x20must\x20synchronize\x20step_ids\x20wit\
    h\x20any\x20other\x20graph\x20with\x20this\n\x20same\x20group_key\x20val\
    ue\x20(in\x20a\x20distributed\x20computation\x20where\x20tasks\n\x20run\
    \x20disjoint\x20graphs).\n\n\x0f\n\x07\x04\x07\x03\0\x02\0\x05\x12\x04\
    \x8f\x05\x04\t\n\x0f\n\x07\x04\x07\x03\0\x02\0\x01\x12\x04\x8f\x05\n\x1e\
    \n\x0f\n\x07\x04\x07\x03\0\x02\0\x03\x12\x04\x8f\x05!\"\n\xf2\x01\n\x06\
    \x04\x07\x03\0\x02\x01\x12\x04\x94\x05\x04\"\x1a\xe1\x01\x20If\x20true,\
    \x20then\x20operations\x20(using\x20the\x20inter-op\x20pool)\x20across\
    \x20all\n\x20session::run()\x20calls\x20will\x20be\x20centrally\x20sched\
    uled,\x20optimizing\x20for\x20(median\n\x20and\x20tail)\x20latency.\n\
    \x20Consider\x20using\x20this\x20option\x20for\x20CPU-bound\x20workloads\
    \x20like\x20inference.\n\n\x0f\n\x07\x04\x07\x03\0\x02\x01\x05\x12\x04\
    \x94\x05\x04\x08\n\x0f\n\x07\x04\x07\x03\0\x02\x01\x01\x12\x04\x94\x05\t\
    \x1d\n\x0f\n\x07\x04\x07\x03\0\x02\x01\x03\x12\x04\x94\x05\x20!\n8\n\x06\
    \x04\x07\x03\0\x03\0\x12\x06\x96\x05\x04\x9a\x05\x05\x1a&\x20Options\x20\
    for\x20run\x20handler\x20thread\x20pool.\n\n\x0f\n\x07\x04\x07\x03\0\x03\
    \0\x01\x12\x04\x96\x05\x0c!\n\xa3\x01\n\x08\x04\x07\x03\0\x03\0\x02\0\
    \x12\x04\x99\x05\x06\x19\x1a\x90\x01\x20Priority\x20of\x20the\x20request\
    .\x20The\x20run\x20handler\x20thread\x20pool\x20will\x20schedule\x20ops\
    \n\x20based\x20on\x20the\x20priority\x20number.\x20The\x20larger\x20numb\
    er\x20means\x20higher\x20priority.\n\n\x11\n\t\x04\x07\x03\0\x03\0\x02\0\
    \x05\x12\x04\x99\x05\x06\x0b\n\x11\n\t\x04\x07\x03\0\x03\0\x02\0\x01\x12\
    \x04\x99\x05\x0c\x14\n\x11\n\t\x04\x07\x03\0\x03\0\x02\0\x03\x12\x04\x99\
    \x05\x17\x18\n\x0e\n\x06\x04\x07\x03\0\x02\x02\x12\x04\x9b\x05\x047\n\
    \x0f\n\x07\x04\x07\x03\0\x02\x02\x06\x12\x04\x9b\x05\x04\x19\n\x0f\n\x07\
    \x04\x07\x03\0\x02\x02\x01\x12\x04\x9b\x05\x1a2\n\x0f\n\x07\x04\x07\x03\
    \0\x02\x02\x03\x12\x04\x9b\x0556\n\x0c\n\x04\x04\x07\x02\x06\x12\x04\x9e\
    \x05\x02\x20\n\r\n\x05\x04\x07\x02\x06\x06\x12\x04\x9e\x05\x02\x0e\n\r\n\
    \x05\x04\x07\x02\x06\x01\x12\x04\x9e\x05\x0f\x1b\n\r\n\x05\x04\x07\x02\
    \x06\x03\x12\x04\x9e\x05\x1e\x1f\n\x0b\n\x03\x04\x07\t\x12\x04\xa0\x05\
    \x02\r\n\x0c\n\x04\x04\x07\t\0\x12\x04\xa0\x05\x0b\x0c\n\r\n\x05\x04\x07\
    \t\0\x01\x12\x04\xa0\x05\x0b\x0c\n\r\n\x05\x04\x07\t\0\x02\x12\x04\xa0\
    \x05\x0b\x0c\nK\n\x02\x04\x08\x12\x06\xa4\x05\0\xc2\x05\x01\x1a=\x20Meta\
    data\x20output\x20(i.e.,\x20non-Tensor)\x20for\x20a\x20single\x20Run()\
    \x20call.\n\n\x0b\n\x03\x04\x08\x01\x12\x04\xa4\x05\x08\x13\n\xbb\x01\n\
    \x04\x04\x08\x02\0\x12\x04\xa8\x05\x02\x1b\x1a\xac\x01\x20Statistics\x20\
    traced\x20for\x20this\x20step.\x20Populated\x20if\x20tracing\x20is\x20tu\
    rned\x20on\x20via\x20the\n\x20\"RunOptions\"\x20proto.\n\x20EXPERIMENTAL\
    :\x20The\x20format\x20and\x20set\x20of\x20events\x20may\x20change\x20in\
    \x20future\x20versions.\n\n\r\n\x05\x04\x08\x02\0\x06\x12\x04\xa8\x05\
    \x02\x0b\n\r\n\x05\x04\x08\x02\0\x01\x12\x04\xa8\x05\x0c\x16\n\r\n\x05\
    \x04\x08\x02\0\x03\x12\x04\xa8\x05\x19\x1a\nK\n\x04\x04\x08\x02\x01\x12\
    \x04\xab\x05\x02\x1e\x1a=\x20The\x20cost\x20graph\x20for\x20the\x20compu\
    tation\x20defined\x20by\x20the\x20run\x20call.\n\n\r\n\x05\x04\x08\x02\
    \x01\x06\x12\x04\xab\x05\x02\x0e\n\r\n\x05\x04\x08\x02\x01\x01\x12\x04\
    \xab\x05\x0f\x19\n\r\n\x05\x04\x08\x02\x01\x03\x12\x04\xab\x05\x1c\x1d\n\
    ?\n\x04\x04\x08\x02\x02\x12\x04\xae\x05\x02)\x1a1\x20Graphs\x20of\x20the\
    \x20partitions\x20executed\x20by\x20executors.\n\n\r\n\x05\x04\x08\x02\
    \x02\x04\x12\x04\xae\x05\x02\n\n\r\n\x05\x04\x08\x02\x02\x06\x12\x04\xae\
    \x05\x0b\x13\n\r\n\x05\x04\x08\x02\x02\x01\x12\x04\xae\x05\x14$\n\r\n\
    \x05\x04\x08\x02\x02\x03\x12\x04\xae\x05'(\n\x0e\n\x04\x04\x08\x03\0\x12\
    \x06\xb0\x05\x02\xb6\x05\x03\n\r\n\x05\x04\x08\x03\0\x01\x12\x04\xb0\x05\
    \n\x18\nW\n\x06\x04\x08\x03\0\x02\0\x12\x04\xb2\x05\x04+\x1aG\x20TODO(na\
    reshmodi):\x20Include\x20some\x20sort\x20of\x20function/cache-key\x20ide\
    ntifier?\n\n\x0f\n\x07\x04\x08\x03\0\x02\0\x04\x12\x04\xb2\x05\x04\x0c\n\
    \x0f\n\x07\x04\x08\x03\0\x02\0\x06\x12\x04\xb2\x05\r\x15\n\x0f\n\x07\x04\
    \x08\x03\0\x02\0\x01\x12\x04\xb2\x05\x16&\n\x0f\n\x07\x04\x08\x03\0\x02\
    \0\x03\x12\x04\xb2\x05)*\n\x0e\n\x06\x04\x08\x03\0\x02\x01\x12\x04\xb4\
    \x05\x04(\n\x0f\n\x07\x04\x08\x03\0\x02\x01\x06\x12\x04\xb4\x05\x04\x0c\
    \n\x0f\n\x07\x04\x08\x03\0\x02\x01\x01\x12\x04\xb4\x05\r#\n\x0f\n\x07\
    \x04\x08\x03\0\x02\x01\x03\x12\x04\xb4\x05&'\n\x0e\n\x06\x04\x08\x03\0\
    \x02\x02\x12\x04\xb5\x05\x04)\n\x0f\n\x07\x04\x08\x03\0\x02\x02\x06\x12\
    \x04\xb5\x05\x04\x0c\n\x0f\n\x07\x04\x08\x03\0\x02\x02\x01\x12\x04\xb5\
    \x05\r$\n\x0f\n\x07\x04\x08\x03\0\x02\x02\x03\x12\x04\xb5\x05'(\n\xb8\
    \x05\n\x04\x04\x08\x02\x03\x12\x04\xc1\x05\x02.\x1a\xa9\x05\x20This\x20i\
    s\x20only\x20populated\x20for\x20graphs\x20that\x20are\x20run\x20as\x20f\
    unctions\x20in\x20TensorFlow\n\x20V2.\x20There\x20will\x20be\x20an\x20en\
    try\x20below\x20for\x20each\x20function\x20that\x20is\x20traced.\n\x20Th\
    e\x20main\x20use\x20cases\x20of\x20the\x20post_optimization_graph\x20and\
    \x20the\x20partition_graphs\n\x20is\x20to\x20give\x20the\x20caller\x20in\
    sight\x20into\x20the\x20graphs\x20that\x20were\x20actually\x20run\x20by\
    \x20the\n\x20runtime.\x20Additional\x20information\x20(such\x20as\x20tho\
    se\x20in\x20step_stats)\x20will\x20match\n\x20these\x20graphs.\n\x20We\
    \x20also\x20include\x20the\x20pre_optimization_graph\x20since\x20it\x20i\
    s\x20usually\x20easier\x20to\n\x20read,\x20and\x20is\x20helpful\x20in\
    \x20situations\x20where\x20the\x20caller\x20wants\x20to\x20get\x20a\x20h\
    igh\n\x20level\x20idea\x20of\x20what\x20the\x20built\x20graph\x20looks\
    \x20like\x20(since\x20the\x20various\x20graph\n\x20optimization\x20passe\
    s\x20might\x20change\x20the\x20structure\x20of\x20the\x20graph\x20signif\
    icantly).\n\n\r\n\x05\x04\x08\x02\x03\x04\x12\x04\xc1\x05\x02\n\n\r\n\
    \x05\x04\x08\x02\x03\x06\x12\x04\xc1\x05\x0b\x19\n\r\n\x05\x04\x08\x02\
    \x03\x01\x12\x04\xc1\x05\x1a)\n\r\n\x05\x04\x08\x02\x03\x03\x12\x04\xc1\
    \x05,-\nI\n\x02\x04\t\x12\x06\xc5\x05\0\xcd\x05\x01\x1a;\x20Defines\x20a\
    \x20connection\x20between\x20two\x20tensors\x20in\x20a\x20`GraphDef`.\n\
    \n\x0b\n\x03\x04\t\x01\x12\x04\xc5\x05\x08\x18\nq\n\x04\x04\t\x02\0\x12\
    \x04\xc8\x05\x02\x19\x1ac\x20A\x20tensor\x20name.\x20The\x20value\x20of\
    \x20this\x20tensor\x20will\x20be\x20substituted\x20for\n\x20the\x20tenso\
    r\x20named\x20in\x20`to_tensor`.\n\n\r\n\x05\x04\t\x02\0\x05\x12\x04\xc8\
    \x05\x02\x08\n\r\n\x05\x04\t\x02\0\x01\x12\x04\xc8\x05\t\x14\n\r\n\x05\
    \x04\t\x02\0\x03\x12\x04\xc8\x05\x17\x18\ny\n\x04\x04\t\x02\x01\x12\x04\
    \xcc\x05\x02\x17\x1ak\x20A\x20tensor\x20name.\x20The\x20value\x20of\x20t\
    his\x20tensor\x20will\x20be\x20bound\x20to\x20the\n\x20value\x20of\x20th\
    e\x20tensor\x20named\x20in\x20`from_tensor`.\n\n\r\n\x05\x04\t\x02\x01\
    \x05\x12\x04\xcc\x05\x02\x08\n\r\n\x05\x04\t\x02\x01\x01\x12\x04\xcc\x05\
    \t\x12\n\r\n\x05\x04\t\x02\x01\x03\x12\x04\xcc\x05\x15\x16\n\xa9\x01\n\
    \x02\x04\n\x12\x06\xd3\x05\0\xa7\x06\x01\x1a\x9a\x01\x20Defines\x20a\x20\
    subgraph\x20in\x20another\x20`GraphDef`\x20as\x20a\x20set\x20of\x20feed\
    \x20points\x20and\x20nodes\n\x20to\x20be\x20fetched\x20or\x20executed.\n\
    \n\x20Compare\x20with\x20the\x20arguments\x20to\x20`Session::Run()`.\n\n\
    \x0b\n\x03\x04\n\x01\x12\x04\xd3\x05\x08\x17\nU\n\x04\x04\n\x02\0\x12\
    \x04\xd5\x05\x02\x1b\x1aG\x20Tensors\x20to\x20be\x20fed\x20in\x20the\x20\
    callable.\x20Each\x20feed\x20is\x20the\x20name\x20of\x20a\x20tensor.\n\n\
    \r\n\x05\x04\n\x02\0\x04\x12\x04\xd5\x05\x02\n\n\r\n\x05\x04\n\x02\0\x05\
    \x12\x04\xd5\x05\x0b\x11\n\r\n\x05\x04\n\x02\0\x01\x12\x04\xd5\x05\x12\
    \x16\n\r\n\x05\x04\n\x02\0\x03\x12\x04\xd5\x05\x19\x1a\n\xe2\x01\n\x04\
    \x04\n\x02\x01\x12\x04\xda\x05\x02\x1c\x1a\xd3\x01\x20Fetches.\x20A\x20l\
    ist\x20of\x20tensor\x20names.\x20The\x20caller\x20of\x20the\x20callable\
    \x20expects\x20a\n\x20tensor\x20to\x20be\x20returned\x20for\x20each\x20f\
    etch[i]\x20(see\x20RunStepResponse.tensor).\x20The\n\x20order\x20of\x20s\
    pecified\x20fetches\x20does\x20not\x20change\x20the\x20execution\x20orde\
    r.\n\n\r\n\x05\x04\n\x02\x01\x04\x12\x04\xda\x05\x02\n\n\r\n\x05\x04\n\
    \x02\x01\x05\x12\x04\xda\x05\x0b\x11\n\r\n\x05\x04\n\x02\x01\x01\x12\x04\
    \xda\x05\x12\x17\n\r\n\x05\x04\n\x02\x01\x03\x12\x04\xda\x05\x1a\x1b\n\
    \x88\x01\n\x04\x04\n\x02\x02\x12\x04\xde\x05\x02\x1d\x1az\x20Target\x20N\
    odes.\x20A\x20list\x20of\x20node\x20names.\x20The\x20named\x20nodes\x20w\
    ill\x20be\x20run\x20by\x20the\n\x20callable\x20but\x20their\x20outputs\
    \x20will\x20not\x20be\x20returned.\n\n\r\n\x05\x04\n\x02\x02\x04\x12\x04\
    \xde\x05\x02\n\n\r\n\x05\x04\n\x02\x02\x05\x12\x04\xde\x05\x0b\x11\n\r\n\
    \x05\x04\n\x02\x02\x01\x12\x04\xde\x05\x12\x18\n\r\n\x05\x04\n\x02\x02\
    \x03\x12\x04\xde\x05\x1b\x1c\n9\n\x04\x04\n\x02\x03\x12\x04\xe1\x05\x02\
    \x1d\x1a+\x20Options\x20that\x20will\x20be\x20applied\x20to\x20each\x20r\
    un.\n\n\r\n\x05\x04\n\x02\x03\x06\x12\x04\xe1\x05\x02\x0c\n\r\n\x05\x04\
    \n\x02\x03\x01\x12\x04\xe1\x05\r\x18\n\r\n\x05\x04\n\x02\x03\x03\x12\x04\
    \xe1\x05\x1b\x1c\n\xb0\x01\n\x04\x04\n\x02\x04\x12\x04\xe6\x05\x022\x1a\
    \xa1\x01\x20Tensors\x20to\x20be\x20connected\x20in\x20the\x20callable.\
    \x20Each\x20TensorConnection\x20denotes\n\x20a\x20pair\x20of\x20tensors\
    \x20in\x20the\x20graph,\x20between\x20which\x20an\x20edge\x20will\x20be\
    \x20created\n\x20in\x20the\x20callable.\n\n\r\n\x05\x04\n\x02\x04\x04\
    \x12\x04\xe6\x05\x02\n\n\r\n\x05\x04\n\x02\x04\x06\x12\x04\xe6\x05\x0b\
    \x1b\n\r\n\x05\x04\n\x02\x04\x01\x12\x04\xe6\x05\x1c-\n\r\n\x05\x04\n\
    \x02\x04\x03\x12\x04\xe6\x0501\n\xb7\r\n\x04\x04\n\x02\x05\x12\x04\x97\
    \x06\x02'\x1a\xa8\r\x20The\x20Tensor\x20objects\x20fed\x20in\x20the\x20c\
    allable\x20and\x20fetched\x20from\x20the\x20callable\n\x20are\x20expecte\
    d\x20to\x20be\x20backed\x20by\x20host\x20(CPU)\x20memory\x20by\x20defaul\
    t.\n\n\x20The\x20options\x20below\x20allow\x20changing\x20that\x20-\x20f\
    eeding\x20tensors\x20backed\x20by\n\x20device\x20memory,\x20or\x20return\
    ing\x20tensors\x20that\x20are\x20backed\x20by\x20device\x20memory.\n\n\
    \x20The\x20maps\x20below\x20map\x20the\x20name\x20of\x20a\x20feed/fetch\
    \x20tensor\x20(which\x20appears\x20in\n\x20'feed'\x20or\x20'fetch'\x20fi\
    elds\x20above),\x20to\x20the\x20fully\x20qualified\x20name\x20of\x20the\
    \x20device\n\x20owning\x20the\x20memory\x20backing\x20the\x20contents\
    \x20of\x20the\x20tensor.\n\n\x20For\x20example,\x20creating\x20a\x20call\
    able\x20with\x20the\x20following\x20options:\n\n\x20CallableOptions\x20{\
    \n\x20\x20\x20feed:\x20\"a:0\"\n\x20\x20\x20feed:\x20\"b:0\"\n\n\x20\x20\
    \x20fetch:\x20\"x:0\"\n\x20\x20\x20fetch:\x20\"y:0\"\n\n\x20\x20\x20feed\
    _devices:\x20{\n\x20\x20\x20\x20\x20\"a:0\":\x20\"/job:localhost/replica\
    :0/task:0/device:GPU:0\"\n\x20\x20\x20}\n\n\x20\x20\x20fetch_devices:\
    \x20{\n\x20\x20\x20\x20\x20\"y:0\":\x20\"/job:localhost/replica:0/task:0\
    /device:GPU:0\"\n\x20\x20}\n\x20}\n\n\x20means\x20that\x20the\x20Callabl\
    e\x20expects:\n\x20-\x20The\x20first\x20argument\x20(\"a:0\")\x20is\x20a\
    \x20Tensor\x20backed\x20by\x20GPU\x20memory.\n\x20-\x20The\x20second\x20\
    argument\x20(\"b:0\")\x20is\x20a\x20Tensor\x20backed\x20by\x20host\x20me\
    mory.\n\x20and\x20of\x20its\x20return\x20values:\n\x20-\x20The\x20first\
    \x20output\x20(\"x:0\")\x20will\x20be\x20backed\x20by\x20host\x20memory.\
    \n\x20-\x20The\x20second\x20output\x20(\"y:0\")\x20will\x20be\x20backed\
    \x20by\x20GPU\x20memory.\n\n\x20FEEDS:\n\x20It\x20is\x20the\x20responsib\
    ility\x20of\x20the\x20caller\x20to\x20ensure\x20that\x20the\x20memory\
    \x20of\x20the\x20fed\n\x20tensors\x20will\x20be\x20correctly\x20initiali\
    zed\x20and\x20synchronized\x20before\x20it\x20is\n\x20accessed\x20by\x20\
    operations\x20executed\x20during\x20the\x20call\x20to\x20Session::RunCal\
    lable().\n\n\x20This\x20is\x20typically\x20ensured\x20by\x20using\x20the\
    \x20TensorFlow\x20memory\x20allocators\n\x20(Device::GetAllocator())\x20\
    to\x20create\x20the\x20Tensor\x20to\x20be\x20fed.\n\n\x20Alternatively,\
    \x20for\x20CUDA-enabled\x20GPU\x20devices,\x20this\x20typically\x20means\
    \x20that\x20the\n\x20operation\x20that\x20produced\x20the\x20contents\
    \x20of\x20the\x20tensor\x20has\x20completed,\x20i.e.,\x20the\n\x20CUDA\
    \x20stream\x20has\x20been\x20synchronized\x20(e.g.,\x20via\x20cuCtxSynch\
    ronize()\x20or\n\x20cuStreamSynchronize()).\n\n\r\n\x05\x04\n\x02\x05\
    \x06\x12\x04\x97\x06\x02\x15\n\r\n\x05\x04\n\x02\x05\x01\x12\x04\x97\x06\
    \x16\"\n\r\n\x05\x04\n\x02\x05\x03\x12\x04\x97\x06%&\n\x0c\n\x04\x04\n\
    \x02\x06\x12\x04\x98\x06\x02(\n\r\n\x05\x04\n\x02\x06\x06\x12\x04\x98\
    \x06\x02\x15\n\r\n\x05\x04\n\x02\x06\x01\x12\x04\x98\x06\x16#\n\r\n\x05\
    \x04\n\x02\x06\x03\x12\x04\x98\x06&'\n\xf5\x04\n\x04\x04\n\x02\x07\x12\
    \x04\xa4\x06\x02\x1b\x1a\xe6\x04\x20By\x20default,\x20RunCallable()\x20w\
    ill\x20synchronize\x20the\x20GPU\x20stream\x20before\x20returning\n\x20f\
    etched\x20tensors\x20on\x20a\x20GPU\x20device,\x20to\x20ensure\x20that\
    \x20the\x20values\x20in\x20those\x20tensors\n\x20have\x20been\x20produce\
    d.\x20This\x20simplifies\x20interacting\x20with\x20the\x20tensors,\x20bu\
    t\n\x20potentially\x20incurs\x20a\x20performance\x20hit.\n\n\x20If\x20th\
    is\x20options\x20is\x20set\x20to\x20true,\x20the\x20caller\x20is\x20resp\
    onsible\x20for\x20ensuring\n\x20that\x20the\x20values\x20in\x20the\x20fe\
    tched\x20tensors\x20have\x20been\x20produced\x20before\x20they\x20are\n\
    \x20used.\x20The\x20caller\x20can\x20do\x20this\x20by\x20invoking\x20`De\
    vice::Sync()`\x20on\x20the\x20underlying\n\x20device(s),\x20or\x20by\x20\
    feeding\x20the\x20tensors\x20back\x20to\x20the\x20same\x20Session\x20usi\
    ng\n\x20`feed_devices`\x20with\x20the\x20same\x20corresponding\x20device\
    \x20name.\n\n\r\n\x05\x04\n\x02\x07\x05\x12\x04\xa4\x06\x02\x06\n\r\n\
    \x05\x04\n\x02\x07\x01\x12\x04\xa4\x06\x07\x16\n\r\n\x05\x04\n\x02\x07\
    \x03\x12\x04\xa4\x06\x19\x1ab\x06proto3\
";

static mut file_descriptor_proto_lazy: ::protobuf::lazy::Lazy<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::lazy::Lazy::INIT;

fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
    ::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap()
}

pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
    unsafe {
        file_descriptor_proto_lazy.get(|| {
            parse_descriptor_proto()
        })
    }
}