Skip to main content

kn_graph/onnx/
proto.rs

1/// Attributes
2///
3/// A named attribute containing either singular float, integer, string, graph,
4/// and tensor values, or repeated float, integer, string, graph, and tensor values.
5/// An AttributeProto MUST contain the name field, and *only one* of the
6/// following content fields, effectively enforcing a C/C++ union equivalent.
7#[allow(clippy::derive_partial_eq_without_eq)]
8#[derive(Clone, PartialEq, ::prost::Message)]
9pub struct AttributeProto {
10    /// The name field MUST be present for this version of the IR.
11    ///
12    /// namespace Attribute
13    #[prost(string, tag = "1")]
14    pub name: ::prost::alloc::string::String,
15    /// if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function.
16    /// In this case, this AttributeProto does not contain data, and it's a reference of attribute
17    /// in parent scope.
18    /// NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph.
19    #[prost(string, tag = "21")]
20    pub ref_attr_name: ::prost::alloc::string::String,
21    /// A human-readable documentation for this attribute. Markdown is allowed.
22    #[prost(string, tag = "13")]
23    pub doc_string: ::prost::alloc::string::String,
24    /// The type field MUST be present for this version of the IR.
25    /// For 0.0.1 versions of the IR, this field was not defined, and
26    /// implementations needed to use has_field heuristics to determine
27    /// which value field was in use.  For IR_VERSION 0.0.2 or later, this
28    /// field MUST be set and match the f|i|s|t|... field in use.  This
29    /// change was made to accommodate proto3 implementations.
30    ///
31    /// discriminator that indicates which field below is in use
32    #[prost(enumeration = "attribute_proto::AttributeType", tag = "20")]
33    pub r#type: i32,
34    /// Exactly ONE of the following fields must be present for this version of the IR
35    ///
36    /// float
37    #[prost(float, tag = "2")]
38    pub f: f32,
39    /// int
40    #[prost(int64, tag = "3")]
41    pub i: i64,
42    /// UTF-8 string
43    #[prost(bytes = "vec", tag = "4")]
44    pub s: ::prost::alloc::vec::Vec<u8>,
45    /// tensor value
46    #[prost(message, optional, tag = "5")]
47    pub t: ::core::option::Option<TensorProto>,
48    /// graph
49    #[prost(message, optional, tag = "6")]
50    pub g: ::core::option::Option<GraphProto>,
51    /// sparse tensor value
52    #[prost(message, optional, tag = "22")]
53    pub sparse_tensor: ::core::option::Option<SparseTensorProto>,
54    /// Do not use field below, it's deprecated.
55    /// optional ValueProto v = 12;         // value - subsumes everything but graph
56    ///
57    /// type proto
58    #[prost(message, optional, tag = "14")]
59    pub tp: ::core::option::Option<TypeProto>,
60    /// list of floats
61    #[prost(float, repeated, tag = "7")]
62    pub floats: ::prost::alloc::vec::Vec<f32>,
63    /// list of ints
64    #[prost(int64, repeated, tag = "8")]
65    pub ints: ::prost::alloc::vec::Vec<i64>,
66    /// list of UTF-8 strings
67    #[prost(bytes = "vec", repeated, tag = "9")]
68    pub strings: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,
69    /// list of tensors
70    #[prost(message, repeated, tag = "10")]
71    pub tensors: ::prost::alloc::vec::Vec<TensorProto>,
72    /// list of graph
73    #[prost(message, repeated, tag = "11")]
74    pub graphs: ::prost::alloc::vec::Vec<GraphProto>,
75    /// list of sparse tensors
76    #[prost(message, repeated, tag = "23")]
77    pub sparse_tensors: ::prost::alloc::vec::Vec<SparseTensorProto>,
78    /// list of type protos
79    #[prost(message, repeated, tag = "15")]
80    pub type_protos: ::prost::alloc::vec::Vec<TypeProto>,
81}
82/// Nested message and enum types in `AttributeProto`.
83pub mod attribute_proto {
84    /// Note: this enum is structurally identical to the OpSchema::AttrType
85    /// enum defined in schema.h.  If you rev one, you likely need to rev the other.
86    #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
87    #[repr(i32)]
88    pub enum AttributeType {
89        Undefined = 0,
90        Float = 1,
91        Int = 2,
92        String = 3,
93        Tensor = 4,
94        Graph = 5,
95        SparseTensor = 11,
96        TypeProto = 13,
97        Floats = 6,
98        Ints = 7,
99        Strings = 8,
100        Tensors = 9,
101        Graphs = 10,
102        SparseTensors = 12,
103        TypeProtos = 14,
104    }
105    impl AttributeType {
106        /// String value of the enum field names used in the ProtoBuf definition.
107        ///
108        /// The values are not transformed in any way and thus are considered stable
109        /// (if the ProtoBuf definition does not change) and safe for programmatic use.
110        pub fn as_str_name(&self) -> &'static str {
111            match self {
112                AttributeType::Undefined => "UNDEFINED",
113                AttributeType::Float => "FLOAT",
114                AttributeType::Int => "INT",
115                AttributeType::String => "STRING",
116                AttributeType::Tensor => "TENSOR",
117                AttributeType::Graph => "GRAPH",
118                AttributeType::SparseTensor => "SPARSE_TENSOR",
119                AttributeType::TypeProto => "TYPE_PROTO",
120                AttributeType::Floats => "FLOATS",
121                AttributeType::Ints => "INTS",
122                AttributeType::Strings => "STRINGS",
123                AttributeType::Tensors => "TENSORS",
124                AttributeType::Graphs => "GRAPHS",
125                AttributeType::SparseTensors => "SPARSE_TENSORS",
126                AttributeType::TypeProtos => "TYPE_PROTOS",
127            }
128        }
129        /// Creates an enum from field names used in the ProtoBuf definition.
130        pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
131            match value {
132                "UNDEFINED" => Some(Self::Undefined),
133                "FLOAT" => Some(Self::Float),
134                "INT" => Some(Self::Int),
135                "STRING" => Some(Self::String),
136                "TENSOR" => Some(Self::Tensor),
137                "GRAPH" => Some(Self::Graph),
138                "SPARSE_TENSOR" => Some(Self::SparseTensor),
139                "TYPE_PROTO" => Some(Self::TypeProto),
140                "FLOATS" => Some(Self::Floats),
141                "INTS" => Some(Self::Ints),
142                "STRINGS" => Some(Self::Strings),
143                "TENSORS" => Some(Self::Tensors),
144                "GRAPHS" => Some(Self::Graphs),
145                "SPARSE_TENSORS" => Some(Self::SparseTensors),
146                "TYPE_PROTOS" => Some(Self::TypeProtos),
147                _ => None,
148            }
149        }
150    }
151}
152/// Defines information on value, including the name, the type, and
153/// the shape of the value.
154#[allow(clippy::derive_partial_eq_without_eq)]
155#[derive(Clone, PartialEq, ::prost::Message)]
156pub struct ValueInfoProto {
157    /// This field MUST be present in this version of the IR.
158    ///
159    /// namespace Value
160    #[prost(string, tag = "1")]
161    pub name: ::prost::alloc::string::String,
162    /// This field MUST be present in this version of the IR for
163    /// inputs and outputs of the top-level graph.
164    #[prost(message, optional, tag = "2")]
165    pub r#type: ::core::option::Option<TypeProto>,
166    /// A human-readable documentation for this value. Markdown is allowed.
167    #[prost(string, tag = "3")]
168    pub doc_string: ::prost::alloc::string::String,
169}
170/// Nodes
171///
172/// Computation graphs are made up of a DAG of nodes, which represent what is
173/// commonly called a "layer" or "pipeline stage" in machine learning frameworks.
174///
175/// For example, it can be a node of type "Conv" that takes in an image, a filter
176/// tensor and a bias tensor, and produces the convolved output.
177#[allow(clippy::derive_partial_eq_without_eq)]
178#[derive(Clone, PartialEq, ::prost::Message)]
179pub struct NodeProto {
180    /// namespace Value
181    #[prost(string, repeated, tag = "1")]
182    pub input: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
183    /// namespace Value
184    #[prost(string, repeated, tag = "2")]
185    pub output: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
186    /// An optional identifier for this node in a graph.
187    /// This field MAY be absent in ths version of the IR.
188    ///
189    /// namespace Node
190    #[prost(string, tag = "3")]
191    pub name: ::prost::alloc::string::String,
192    /// The symbolic identifier of the Operator to execute.
193    ///
194    /// namespace Operator
195    #[prost(string, tag = "4")]
196    pub op_type: ::prost::alloc::string::String,
197    /// The domain of the OperatorSet that specifies the operator named by op_type.
198    ///
199    /// namespace Domain
200    #[prost(string, tag = "7")]
201    pub domain: ::prost::alloc::string::String,
202    /// Additional named attributes.
203    #[prost(message, repeated, tag = "5")]
204    pub attribute: ::prost::alloc::vec::Vec<AttributeProto>,
205    /// A human-readable documentation for this node. Markdown is allowed.
206    #[prost(string, tag = "6")]
207    pub doc_string: ::prost::alloc::string::String,
208}
209/// Training information
210/// TrainingInfoProto stores information for training a model.
211/// In particular, this defines two functionalities: an initialization-step
212/// and a training-algorithm-step. Initialization resets the model
213/// back to its original state as if no training has been performed.
214/// Training algorithm improves the model based on input data.
215///
216/// The semantics of the initialization-step is that the initializers
217/// in ModelProto.graph and in TrainingInfoProto.algorithm are first
218/// initialized as specified by the initializers in the graph, and then
219/// updated by the "initialization_binding" in every instance in
220/// ModelProto.training_info.
221///
222/// The field "algorithm" defines a computation graph which represents a
223/// training algorithm's step. After the execution of a
224/// TrainingInfoProto.algorithm, the initializers specified by "update_binding"
225/// may be immediately updated. If the targeted training algorithm contains
226/// consecutive update steps (such as block coordinate descent methods),
227/// the user needs to create a TrainingInfoProto for each step.
228#[allow(clippy::derive_partial_eq_without_eq)]
229#[derive(Clone, PartialEq, ::prost::Message)]
230pub struct TrainingInfoProto {
231    /// This field describes a graph to compute the initial tensors
232    /// upon starting the training process. Initialization graph has no input
233    /// and can have multiple outputs. Usually, trainable tensors in neural
234    /// networks are randomly initialized. To achieve that, for each tensor,
235    /// the user can put a random number operator such as RandomNormal or
236    /// RandomUniform in TrainingInfoProto.initialization.node and assign its
237    /// random output to the specific tensor using "initialization_binding".
238    /// This graph can also set the initializers in "algorithm" in the same
239    /// TrainingInfoProto; a use case is resetting the number of training
240    /// iteration to zero.
241    ///
242    /// By default, this field is an empty graph and its evaluation does not
243    /// produce any output. Thus, no initializer would be changed by default.
244    #[prost(message, optional, tag = "1")]
245    pub initialization: ::core::option::Option<GraphProto>,
246    /// This field represents a training algorithm step. Given required inputs,
247    /// it computes outputs to update initializers in its own or inference graph's
248    /// initializer lists. In general, this field contains loss node, gradient node,
249    /// optimizer node, increment of iteration count.
250    ///
251    /// An execution of the training algorithm step is performed by executing the
252    /// graph obtained by combining the inference graph (namely "ModelProto.graph")
253    /// and the "algorithm" graph. That is, the actual the actual
254    /// input/initializer/output/node/value_info/sparse_initializer list of
255    /// the training graph is the concatenation of
256    /// "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer"
257    /// and "algorithm.input/initializer/output/node/value_info/sparse_initializer"
258    /// in that order. This combined graph must satisfy the normal ONNX conditions.
259    /// Now, let's provide a visualization of graph combination for clarity.
260    /// Let the inference graph (i.e., "ModelProto.graph") be
261    ///     tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d
262    /// and the "algorithm" graph be
263    ///     tensor_d -> Add -> tensor_e
264    /// The combination process results
265    ///     tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e
266    ///
267    /// Notice that an input of a node in the "algorithm" graph may reference the
268    /// output of a node in the inference graph (but not the other way round). Also, inference
269    /// node cannot reference inputs of "algorithm". With these restrictions, inference graph
270    /// can always be run independently without training information.
271    ///
272    /// By default, this field is an empty graph and its evaluation does not
273    /// produce any output. Evaluating the default training step never
274    /// update any initializers.
275    #[prost(message, optional, tag = "2")]
276    pub algorithm: ::core::option::Option<GraphProto>,
277    /// This field specifies the bindings from the outputs of "initialization" to
278    /// some initializers in "ModelProto.graph.initializer" and
279    /// the "algorithm.initializer" in the same TrainingInfoProto.
280    /// See "update_binding" below for details.
281    ///
282    /// By default, this field is empty and no initializer would be changed
283    /// by the execution of "initialization".
284    #[prost(message, repeated, tag = "3")]
285    pub initialization_binding: ::prost::alloc::vec::Vec<StringStringEntryProto>,
286    /// Gradient-based training is usually an iterative procedure. In one gradient
287    /// descent iteration, we apply
288    ///
289    /// x = x - r * g
290    ///
291    /// where "x" is the optimized tensor, "r" stands for learning rate, and "g" is
292    /// gradient of "x" with respect to a chosen loss. To avoid adding assignments
293    /// into the training graph, we split the update equation into
294    ///
295    /// y = x - r * g
296    /// x = y
297    ///
298    /// The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To
299    /// tell that "y" should be assigned to "x", the field "update_binding" may
300    /// contain a key-value pair of strings, "x" (key of StringStringEntryProto)
301    /// and "y" (value of StringStringEntryProto).
302    /// For a neural network with multiple trainable (mutable) tensors, there can
303    /// be multiple key-value pairs in "update_binding".
304    ///
305    /// The initializers appears as keys in "update_binding" are considered
306    /// mutable variables. This implies some behaviors
307    /// as described below.
308    ///
309    ///   1. We have only unique keys in all "update_binding"s so that two
310    ///      variables may not have the same name. This ensures that one
311    ///      variable is assigned up to once.
312    ///   2. The keys must appear in names of "ModelProto.graph.initializer" or
313    ///      "TrainingInfoProto.algorithm.initializer".
314    ///   3. The values must be output names of "algorithm" or "ModelProto.graph.output".
315    ///   4. Mutable variables are initialized to the value specified by the
316    ///      corresponding initializer, and then potentially updated by
317    ///      "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s.
318    ///
319    /// This field usually contains names of trainable tensors
320    /// (in ModelProto.graph), optimizer states such as momentums in advanced
321    /// stochastic gradient methods (in TrainingInfoProto.graph),
322    /// and number of training iterations (in TrainingInfoProto.graph).
323    ///
324    /// By default, this field is empty and no initializer would be changed
325    /// by the execution of "algorithm".
326    #[prost(message, repeated, tag = "4")]
327    pub update_binding: ::prost::alloc::vec::Vec<StringStringEntryProto>,
328}
329/// Models
330///
331/// ModelProto is a top-level file/container format for bundling a ML model and
332/// associating its computation graph with metadata.
333///
334/// The semantics of the model are described by the associated GraphProto's.
335#[allow(clippy::derive_partial_eq_without_eq)]
336#[derive(Clone, PartialEq, ::prost::Message)]
337pub struct ModelProto {
338    /// The version of the IR this model targets. See Version enum above.
339    /// This field MUST be present.
340    #[prost(int64, tag = "1")]
341    pub ir_version: i64,
342    /// The OperatorSets this model relies on.
343    /// All ModelProtos MUST have at least one entry that
344    /// specifies which version of the ONNX OperatorSet is
345    /// being imported.
346    ///
347    /// All nodes in the ModelProto's graph will bind against the operator
348    /// with the same-domain/same-op_type operator with the HIGHEST version
349    /// in the referenced operator sets.
350    #[prost(message, repeated, tag = "8")]
351    pub opset_import: ::prost::alloc::vec::Vec<OperatorSetIdProto>,
352    /// The name of the framework or tool used to generate this model.
353    /// This field SHOULD be present to indicate which implementation/tool/framework
354    /// emitted the model.
355    #[prost(string, tag = "2")]
356    pub producer_name: ::prost::alloc::string::String,
357    /// The version of the framework or tool used to generate this model.
358    /// This field SHOULD be present to indicate which implementation/tool/framework
359    /// emitted the model.
360    #[prost(string, tag = "3")]
361    pub producer_version: ::prost::alloc::string::String,
362    /// Domain name of the model.
363    /// We use reverse domain names as name space indicators. For example:
364    /// `com.facebook.fair` or `com.microsoft.cognitiveservices`
365    ///
366    /// Together with `model_version` and GraphProto.name, this forms the unique identity of
367    /// the graph.
368    #[prost(string, tag = "4")]
369    pub domain: ::prost::alloc::string::String,
370    /// The version of the graph encoded. See Version enum below.
371    #[prost(int64, tag = "5")]
372    pub model_version: i64,
373    /// A human-readable documentation for this model. Markdown is allowed.
374    #[prost(string, tag = "6")]
375    pub doc_string: ::prost::alloc::string::String,
376    /// The parameterized graph that is evaluated to execute the model.
377    #[prost(message, optional, tag = "7")]
378    pub graph: ::core::option::Option<GraphProto>,
379    /// Named metadata values; keys should be distinct.
380    #[prost(message, repeated, tag = "14")]
381    pub metadata_props: ::prost::alloc::vec::Vec<StringStringEntryProto>,
382    /// Training-specific information. Sequentially executing all stored
383    /// `TrainingInfoProto.algorithm`s and assigning their outputs following
384    /// the corresponding `TrainingInfoProto.update_binding`s is one training
385    /// iteration. Similarly, to initialize the model
386    /// (as if training hasn't happened), the user should sequentially execute
387    /// all stored `TrainingInfoProto.initialization`s and assigns their outputs
388    /// using `TrainingInfoProto.initialization_binding`s.
389    ///
390    /// If this field is empty, the training behavior of the model is undefined.
391    #[prost(message, repeated, tag = "20")]
392    pub training_info: ::prost::alloc::vec::Vec<TrainingInfoProto>,
393    /// A list of function protos local to the model.
394    ///
395    /// Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain".
396    /// In case of any conflicts the behavior (whether the model local functions are given higher priority,
397    /// or standard opserator sets are given higher priotity or this is treated as error) is defined by
398    /// the runtimes.
399    ///
400    /// The operator sets imported by FunctionProto should be compatible with the ones
401    /// imported by ModelProto and other model local FunctionProtos.
402    /// Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto
403    /// or by 2 FunctionProtos then versions for the operator set may be different but,
404    /// the operator schema returned for op_type, domain, version combination
405    /// for both the versions should be same for every node in the function body.
406    ///
407    /// One FunctionProto can reference other FunctionProto in the model, however, recursive reference
408    /// is not allowed.
409    #[prost(message, repeated, tag = "25")]
410    pub functions: ::prost::alloc::vec::Vec<FunctionProto>,
411}
412/// StringStringEntryProto follows the pattern for cross-proto-version maps.
413/// See <https://developers.google.com/protocol-buffers/docs/proto3#maps>
414#[allow(clippy::derive_partial_eq_without_eq)]
415#[derive(Clone, PartialEq, ::prost::Message)]
416pub struct StringStringEntryProto {
417    #[prost(string, tag = "1")]
418    pub key: ::prost::alloc::string::String,
419    #[prost(string, tag = "2")]
420    pub value: ::prost::alloc::string::String,
421}
422#[allow(clippy::derive_partial_eq_without_eq)]
423#[derive(Clone, PartialEq, ::prost::Message)]
424pub struct TensorAnnotation {
425    #[prost(string, tag = "1")]
426    pub tensor_name: ::prost::alloc::string::String,
427    /// <key, value> pairs to annotate tensor specified by <tensor_name> above.
428    /// The keys used in the mapping below must be pre-defined in ONNX spec.
429    /// For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as
430    /// quantization parameter keys.
431    #[prost(message, repeated, tag = "2")]
432    pub quant_parameter_tensor_names: ::prost::alloc::vec::Vec<StringStringEntryProto>,
433}
434/// Graphs
435///
436/// A graph defines the computational logic of a model and is comprised of a parameterized
437/// list of nodes that form a directed acyclic graph based on their inputs and outputs.
438/// This is the equivalent of the "network" or "graph" in many deep learning
439/// frameworks.
440#[allow(clippy::derive_partial_eq_without_eq)]
441#[derive(Clone, PartialEq, ::prost::Message)]
442pub struct GraphProto {
443    /// The nodes in the graph, sorted topologically.
444    #[prost(message, repeated, tag = "1")]
445    pub node: ::prost::alloc::vec::Vec<NodeProto>,
446    /// The name of the graph.
447    ///
448    /// namespace Graph
449    #[prost(string, tag = "2")]
450    pub name: ::prost::alloc::string::String,
451    /// A list of named tensor values, used to specify constant inputs of the graph.
452    /// Each initializer (both TensorProto as well SparseTensorProto) MUST have a name.
453    /// The name MUST be unique across both initializer and sparse_initializer,
454    /// but the name MAY also appear in the input list.
455    #[prost(message, repeated, tag = "5")]
456    pub initializer: ::prost::alloc::vec::Vec<TensorProto>,
457    /// Initializers (see above) stored in sparse format.
458    #[prost(message, repeated, tag = "15")]
459    pub sparse_initializer: ::prost::alloc::vec::Vec<SparseTensorProto>,
460    /// A human-readable documentation for this graph. Markdown is allowed.
461    #[prost(string, tag = "10")]
462    pub doc_string: ::prost::alloc::string::String,
463    /// The inputs and outputs of the graph.
464    #[prost(message, repeated, tag = "11")]
465    pub input: ::prost::alloc::vec::Vec<ValueInfoProto>,
466    #[prost(message, repeated, tag = "12")]
467    pub output: ::prost::alloc::vec::Vec<ValueInfoProto>,
468    /// Information for the values in the graph. The ValueInfoProto.name's
469    /// must be distinct. It is optional for a value to appear in value_info list.
470    #[prost(message, repeated, tag = "13")]
471    pub value_info: ::prost::alloc::vec::Vec<ValueInfoProto>,
472    /// This field carries information to indicate the mapping among a tensor and its
473    /// quantization parameter tensors. For example:
474    /// For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated,
475    /// which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model.
476    #[prost(message, repeated, tag = "14")]
477    pub quantization_annotation: ::prost::alloc::vec::Vec<TensorAnnotation>,
478}
479/// Tensors
480///
481/// A serialized tensor value.
482#[allow(clippy::derive_partial_eq_without_eq)]
483#[derive(Clone, PartialEq, ::prost::Message)]
484pub struct TensorProto {
485    /// The shape of the tensor.
486    #[prost(int64, repeated, tag = "1")]
487    pub dims: ::prost::alloc::vec::Vec<i64>,
488    /// The data type of the tensor.
489    /// This field MUST have a valid TensorProto.DataType value
490    #[prost(int32, tag = "2")]
491    pub data_type: i32,
492    #[prost(message, optional, tag = "3")]
493    pub segment: ::core::option::Option<tensor_proto::Segment>,
494    /// For float and complex64 values
495    /// Complex64 tensors are encoded as a single array of floats,
496    /// with the real components appearing in odd numbered positions,
497    /// and the corresponding imaginary component appearing in the
498    /// subsequent even numbered position. (e.g., \[1.0 + 2.0i, 3.0 + 4.0i\]
499    /// is encoded as \[1.0, 2.0 ,3.0 ,4.0\]
500    /// When this field is present, the data_type field MUST be FLOAT or COMPLEX64.
501    #[prost(float, repeated, tag = "4")]
502    pub float_data: ::prost::alloc::vec::Vec<f32>,
503    /// For int32, uint8, int8, uint16, int16, bool, float8, and float16 values
504    /// float16 and float8 values must be bit-wise converted to an uint16_t prior
505    /// to writing to the buffer.
506    /// When this field is present, the data_type field MUST be
507    /// INT32, INT16, INT8, UINT16, UINT8, BOOL, FLOAT16, BFLOAT16, FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ
508    #[prost(int32, repeated, tag = "5")]
509    pub int32_data: ::prost::alloc::vec::Vec<i32>,
510    /// For strings.
511    /// Each element of string_data is a UTF-8 encoded Unicode
512    /// string. No trailing null, no leading BOM. The protobuf "string"
513    /// scalar type is not used to match ML community conventions.
514    /// When this field is present, the data_type field MUST be STRING
515    #[prost(bytes = "vec", repeated, tag = "6")]
516    pub string_data: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,
517    /// For int64.
518    /// When this field is present, the data_type field MUST be INT64
519    #[prost(int64, repeated, tag = "7")]
520    pub int64_data: ::prost::alloc::vec::Vec<i64>,
521    /// Optionally, a name for the tensor.
522    ///
523    /// namespace Value
524    #[prost(string, tag = "8")]
525    pub name: ::prost::alloc::string::String,
526    /// A human-readable documentation for this tensor. Markdown is allowed.
527    #[prost(string, tag = "12")]
528    pub doc_string: ::prost::alloc::string::String,
529    /// Serializations can either use one of the fields above, or use this
530    /// raw bytes field. The only exception is the string case, where one is
531    /// required to store the content in the repeated bytes string_data field.
532    ///
533    /// When this raw_data field is used to store tensor value, elements MUST
534    /// be stored in as fixed-width, little-endian order.
535    /// Floating-point data types MUST be stored in IEEE 754 format.
536    /// Complex64 elements must be written as two consecutive FLOAT values, real component first.
537    /// Complex128 elements must be written as two consecutive DOUBLE values, real component first.
538    /// Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false).
539    ///
540    /// Note: the advantage of specific field rather than the raw_data field is
541    /// that in some cases (e.g. int data), protobuf does a better packing via
542    /// variable length storage, and may lead to smaller binary footprint.
543    /// When this field is present, the data_type field MUST NOT be STRING or UNDEFINED
544    #[prost(bytes = "vec", tag = "9")]
545    pub raw_data: ::prost::alloc::vec::Vec<u8>,
546    /// Data can be stored inside the protobuf file using type-specific fields or raw_data.
547    /// Alternatively, raw bytes data can be stored in an external file, using the external_data field.
548    /// external_data stores key-value pairs describing data location. Recognized keys are:
549    /// - "location" (required) - POSIX filesystem path relative to the directory where the ONNX
550    ///                            protobuf model was stored
551    /// - "offset" (optional) - position of byte at which stored data begins. Integer stored as string.
552    ///                          Offset values SHOULD be multiples 4096 (page size) to enable mmap support.
553    /// - "length" (optional) - number of bytes containing data. Integer stored as string.
554    /// - "checksum" (optional) - SHA1 digest of file specified in under 'location' key.
555    #[prost(message, repeated, tag = "13")]
556    pub external_data: ::prost::alloc::vec::Vec<StringStringEntryProto>,
557    /// If value not set, data is stored in raw_data (if set) otherwise in type-specified field.
558    #[prost(enumeration = "tensor_proto::DataLocation", tag = "14")]
559    pub data_location: i32,
560    /// For double
561    /// Complex128 tensors are encoded as a single array of doubles,
562    /// with the real components appearing in odd numbered positions,
563    /// and the corresponding imaginary component appearing in the
564    /// subsequent even numbered position. (e.g., \[1.0 + 2.0i, 3.0 + 4.0i\]
565    /// is encoded as \[1.0, 2.0 ,3.0 ,4.0\]
566    /// When this field is present, the data_type field MUST be DOUBLE or COMPLEX128
567    #[prost(double, repeated, tag = "10")]
568    pub double_data: ::prost::alloc::vec::Vec<f64>,
569    /// For uint64 and uint32 values
570    /// When this field is present, the data_type field MUST be
571    /// UINT32 or UINT64
572    #[prost(uint64, repeated, tag = "11")]
573    pub uint64_data: ::prost::alloc::vec::Vec<u64>,
574}
575/// Nested message and enum types in `TensorProto`.
576pub mod tensor_proto {
577    /// For very large tensors, we may want to store them in chunks, in which
578    /// case the following fields will specify the segment that is stored in
579    /// the current TensorProto.
580    #[allow(clippy::derive_partial_eq_without_eq)]
581    #[derive(Clone, PartialEq, ::prost::Message)]
582    pub struct Segment {
583        #[prost(int64, tag = "1")]
584        pub begin: i64,
585        #[prost(int64, tag = "2")]
586        pub end: i64,
587    }
588    #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
589    #[repr(i32)]
590    pub enum DataType {
591        Undefined = 0,
592        /// Basic types.
593        ///
594        /// float
595        Float = 1,
596        /// uint8_t
597        Uint8 = 2,
598        /// int8_t
599        Int8 = 3,
600        /// uint16_t
601        Uint16 = 4,
602        /// int16_t
603        Int16 = 5,
604        /// int32_t
605        Int32 = 6,
606        /// int64_t
607        Int64 = 7,
608        /// string
609        String = 8,
610        /// bool
611        Bool = 9,
612        /// IEEE754 half-precision floating-point format (16 bits wide).
613        /// This format has 1 sign bit, 5 exponent bits, and 10 mantissa bits.
614        Float16 = 10,
615        Double = 11,
616        Uint32 = 12,
617        Uint64 = 13,
618        /// complex with float32 real and imaginary components
619        Complex64 = 14,
620        /// complex with float64 real and imaginary components
621        Complex128 = 15,
622        /// Non-IEEE floating-point format based on IEEE754 single-precision
623        /// floating-point number truncated to 16 bits.
624        /// This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits.
625        Bfloat16 = 16,
626        /// Non-IEEE floating-point format based on papers
627        /// FP8 Formats for Deep Learning, <https://arxiv.org/abs/2209.05433,>
628        /// 8-bit Numerical Formats For Deep Neural Networks, <https://arxiv.org/pdf/2206.02915.pdf.>
629        /// Operators supported FP8 are Cast, CastLike, QuantizeLinear, DequantizeLinear.
630        /// The computation usually happens inside a block quantize / dequantize
631        /// fused by the runtime.
632        ///
633        /// float 8, mostly used for coefficients, supports nan, not inf
634        Float8e4m3fn = 17,
635        /// float 8, mostly used for coefficients, supports nan, not inf, no negative zero
636        Float8e4m3fnuz = 18,
637        /// follows IEEE 754, supports nan, inf, mostly used for gradients
638        Float8e5m2 = 19,
639        /// follows IEEE 754, supports nan, inf, mostly used for gradients, no negative zero
640        Float8e5m2fnuz = 20,
641    }
642    impl DataType {
643        /// String value of the enum field names used in the ProtoBuf definition.
644        ///
645        /// The values are not transformed in any way and thus are considered stable
646        /// (if the ProtoBuf definition does not change) and safe for programmatic use.
647        pub fn as_str_name(&self) -> &'static str {
648            match self {
649                DataType::Undefined => "UNDEFINED",
650                DataType::Float => "FLOAT",
651                DataType::Uint8 => "UINT8",
652                DataType::Int8 => "INT8",
653                DataType::Uint16 => "UINT16",
654                DataType::Int16 => "INT16",
655                DataType::Int32 => "INT32",
656                DataType::Int64 => "INT64",
657                DataType::String => "STRING",
658                DataType::Bool => "BOOL",
659                DataType::Float16 => "FLOAT16",
660                DataType::Double => "DOUBLE",
661                DataType::Uint32 => "UINT32",
662                DataType::Uint64 => "UINT64",
663                DataType::Complex64 => "COMPLEX64",
664                DataType::Complex128 => "COMPLEX128",
665                DataType::Bfloat16 => "BFLOAT16",
666                DataType::Float8e4m3fn => "FLOAT8E4M3FN",
667                DataType::Float8e4m3fnuz => "FLOAT8E4M3FNUZ",
668                DataType::Float8e5m2 => "FLOAT8E5M2",
669                DataType::Float8e5m2fnuz => "FLOAT8E5M2FNUZ",
670            }
671        }
672        /// Creates an enum from field names used in the ProtoBuf definition.
673        pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
674            match value {
675                "UNDEFINED" => Some(Self::Undefined),
676                "FLOAT" => Some(Self::Float),
677                "UINT8" => Some(Self::Uint8),
678                "INT8" => Some(Self::Int8),
679                "UINT16" => Some(Self::Uint16),
680                "INT16" => Some(Self::Int16),
681                "INT32" => Some(Self::Int32),
682                "INT64" => Some(Self::Int64),
683                "STRING" => Some(Self::String),
684                "BOOL" => Some(Self::Bool),
685                "FLOAT16" => Some(Self::Float16),
686                "DOUBLE" => Some(Self::Double),
687                "UINT32" => Some(Self::Uint32),
688                "UINT64" => Some(Self::Uint64),
689                "COMPLEX64" => Some(Self::Complex64),
690                "COMPLEX128" => Some(Self::Complex128),
691                "BFLOAT16" => Some(Self::Bfloat16),
692                "FLOAT8E4M3FN" => Some(Self::Float8e4m3fn),
693                "FLOAT8E4M3FNUZ" => Some(Self::Float8e4m3fnuz),
694                "FLOAT8E5M2" => Some(Self::Float8e5m2),
695                "FLOAT8E5M2FNUZ" => Some(Self::Float8e5m2fnuz),
696                _ => None,
697            }
698        }
699    }
700    /// Location of the data for this tensor. MUST be one of:
701    /// - DEFAULT - data stored inside the protobuf message. Data is stored in raw_data (if set) otherwise in type-specified field.
702    /// - EXTERNAL - data stored in an external location as described by external_data field.
703    #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
704    #[repr(i32)]
705    pub enum DataLocation {
706        Default = 0,
707        External = 1,
708    }
709    impl DataLocation {
710        /// String value of the enum field names used in the ProtoBuf definition.
711        ///
712        /// The values are not transformed in any way and thus are considered stable
713        /// (if the ProtoBuf definition does not change) and safe for programmatic use.
714        pub fn as_str_name(&self) -> &'static str {
715            match self {
716                DataLocation::Default => "DEFAULT",
717                DataLocation::External => "EXTERNAL",
718            }
719        }
720        /// Creates an enum from field names used in the ProtoBuf definition.
721        pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
722            match value {
723                "DEFAULT" => Some(Self::Default),
724                "EXTERNAL" => Some(Self::External),
725                _ => None,
726            }
727        }
728    }
729}
730/// A serialized sparse-tensor value
731#[allow(clippy::derive_partial_eq_without_eq)]
732#[derive(Clone, PartialEq, ::prost::Message)]
733pub struct SparseTensorProto {
734    /// The sequence of non-default values are encoded as a tensor of shape \[NNZ\].
735    /// The default-value is zero for numeric tensors, and empty-string for string tensors.
736    /// values must have a non-empty name present which serves as a name for SparseTensorProto
737    /// when used in sparse_initializer list.
738    #[prost(message, optional, tag = "1")]
739    pub values: ::core::option::Option<TensorProto>,
740    /// The indices of the non-default values, which may be stored in one of two formats.
741    /// (a) Indices can be a tensor of shape \[NNZ, rank\] with the \[i,j\]-th value
742    /// corresponding to the j-th index of the i-th value (in the values tensor).
743    /// (b) Indices can be a tensor of shape \[NNZ\], in which case the i-th value
744    /// must be the linearized-index of the i-th value (in the values tensor).
745    /// The linearized-index can be converted into an index tuple (k_1,...,k_rank)
746    /// using the shape provided below.
747    /// The indices must appear in ascending order without duplication.
748    /// In the first format, the ordering is lexicographic-ordering:
749    /// e.g., index-value \[1,4\] must appear before \[2,1\]
750    #[prost(message, optional, tag = "2")]
751    pub indices: ::core::option::Option<TensorProto>,
752    /// The shape of the underlying dense-tensor: \[dim_1, dim_2, ... dim_rank\]
753    #[prost(int64, repeated, tag = "3")]
754    pub dims: ::prost::alloc::vec::Vec<i64>,
755}
756/// Defines a tensor shape. A dimension can be either an integer value
757/// or a symbolic variable. A symbolic variable represents an unknown
758/// dimension.
759#[allow(clippy::derive_partial_eq_without_eq)]
760#[derive(Clone, PartialEq, ::prost::Message)]
761pub struct TensorShapeProto {
762    #[prost(message, repeated, tag = "1")]
763    pub dim: ::prost::alloc::vec::Vec<tensor_shape_proto::Dimension>,
764}
765/// Nested message and enum types in `TensorShapeProto`.
766pub mod tensor_shape_proto {
767    #[allow(clippy::derive_partial_eq_without_eq)]
768    #[derive(Clone, PartialEq, ::prost::Message)]
769    pub struct Dimension {
770        /// Standard denotation can optionally be used to denote tensor
771        /// dimensions with standard semantic descriptions to ensure
772        /// that operations are applied to the correct axis of a tensor.
773        /// Refer to <https://github.com/onnx/onnx/blob/main/docs/DimensionDenotation.md#denotation-definition>
774        /// for pre-defined dimension denotations.
775        #[prost(string, tag = "3")]
776        pub denotation: ::prost::alloc::string::String,
777        #[prost(oneof = "dimension::Value", tags = "1, 2")]
778        pub value: ::core::option::Option<dimension::Value>,
779    }
780    /// Nested message and enum types in `Dimension`.
781    pub mod dimension {
782        #[allow(clippy::derive_partial_eq_without_eq)]
783        #[derive(Clone, PartialEq, ::prost::Oneof)]
784        pub enum Value {
785            #[prost(int64, tag = "1")]
786            DimValue(i64),
787            /// namespace Shape
788            #[prost(string, tag = "2")]
789            DimParam(::prost::alloc::string::String),
790        }
791    }
792}
793/// Types
794///
795/// The standard ONNX data types.
796#[allow(clippy::derive_partial_eq_without_eq)]
797#[derive(Clone, PartialEq, ::prost::Message)]
798pub struct TypeProto {
799    /// An optional denotation can be used to denote the whole
800    /// type with a standard semantic description as to what is
801    /// stored inside. Refer to <https://github.com/onnx/onnx/blob/main/docs/TypeDenotation.md#type-denotation-definition>
802    /// for pre-defined type denotations.
803    #[prost(string, tag = "6")]
804    pub denotation: ::prost::alloc::string::String,
805    #[prost(oneof = "type_proto::Value", tags = "1, 4, 5, 9, 8")]
806    pub value: ::core::option::Option<type_proto::Value>,
807}
808/// Nested message and enum types in `TypeProto`.
809pub mod type_proto {
810    #[allow(clippy::derive_partial_eq_without_eq)]
811    #[derive(Clone, PartialEq, ::prost::Message)]
812    pub struct Tensor {
813        /// This field MUST NOT have the value of UNDEFINED
814        /// This field MUST have a valid TensorProto.DataType value
815        /// This field MUST be present for this version of the IR.
816        #[prost(int32, tag = "1")]
817        pub elem_type: i32,
818        #[prost(message, optional, tag = "2")]
819        pub shape: ::core::option::Option<super::TensorShapeProto>,
820    }
821    /// repeated T
822    #[allow(clippy::derive_partial_eq_without_eq)]
823    #[derive(Clone, PartialEq, ::prost::Message)]
824    pub struct Sequence {
825        /// The type and optional shape of each element of the sequence.
826        /// This field MUST be present for this version of the IR.
827        #[prost(message, optional, boxed, tag = "1")]
828        pub elem_type: ::core::option::Option<::prost::alloc::boxed::Box<super::TypeProto>>,
829    }
830    /// map<K,V>
831    #[allow(clippy::derive_partial_eq_without_eq)]
832    #[derive(Clone, PartialEq, ::prost::Message)]
833    pub struct Map {
834        /// This field MUST have a valid TensorProto.DataType value
835        /// This field MUST be present for this version of the IR.
836        /// This field MUST refer to an integral type (\[U\]INT{8|16|32|64}) or STRING
837        #[prost(int32, tag = "1")]
838        pub key_type: i32,
839        /// This field MUST be present for this version of the IR.
840        #[prost(message, optional, boxed, tag = "2")]
841        pub value_type: ::core::option::Option<::prost::alloc::boxed::Box<super::TypeProto>>,
842    }
843    /// wrapper for Tensor, Sequence, or Map
844    #[allow(clippy::derive_partial_eq_without_eq)]
845    #[derive(Clone, PartialEq, ::prost::Message)]
846    pub struct Optional {
847        /// The type and optional shape of the element wrapped.
848        /// This field MUST be present for this version of the IR.
849        /// Possible values correspond to OptionalProto.DataType enum
850        #[prost(message, optional, boxed, tag = "1")]
851        pub elem_type: ::core::option::Option<::prost::alloc::boxed::Box<super::TypeProto>>,
852    }
853    #[allow(clippy::derive_partial_eq_without_eq)]
854    #[derive(Clone, PartialEq, ::prost::Message)]
855    pub struct SparseTensor {
856        /// This field MUST NOT have the value of UNDEFINED
857        /// This field MUST have a valid TensorProto.DataType value
858        /// This field MUST be present for this version of the IR.
859        #[prost(int32, tag = "1")]
860        pub elem_type: i32,
861        #[prost(message, optional, tag = "2")]
862        pub shape: ::core::option::Option<super::TensorShapeProto>,
863    }
864    #[allow(clippy::derive_partial_eq_without_eq)]
865    #[derive(Clone, PartialEq, ::prost::Oneof)]
866    pub enum Value {
867        /// The type of a tensor.
868        #[prost(message, tag = "1")]
869        TensorType(Tensor),
870        /// The type of a sequence.
871        #[prost(message, tag = "4")]
872        SequenceType(::prost::alloc::boxed::Box<Sequence>),
873        /// The type of a map.
874        #[prost(message, tag = "5")]
875        MapType(::prost::alloc::boxed::Box<Map>),
876        /// The type of an optional.
877        #[prost(message, tag = "9")]
878        OptionalType(::prost::alloc::boxed::Box<Optional>),
879        /// Type of the sparse tensor
880        #[prost(message, tag = "8")]
881        SparseTensorType(SparseTensor),
882    }
883}
884/// Operator Sets
885///
886/// OperatorSets are uniquely identified by a (domain, opset_version) pair.
887#[allow(clippy::derive_partial_eq_without_eq)]
888#[derive(Clone, PartialEq, ::prost::Message)]
889pub struct OperatorSetIdProto {
890    /// The domain of the operator set being identified.
891    /// The empty string ("") or absence of this field implies the operator
892    /// set that is defined as part of the ONNX specification.
893    /// This field MUST be present in this version of the IR when referring to any other operator set.
894    #[prost(string, tag = "1")]
895    pub domain: ::prost::alloc::string::String,
896    /// The version of the operator set being identified.
897    /// This field MUST be present in this version of the IR.
898    #[prost(int64, tag = "2")]
899    pub version: i64,
900}
901#[allow(clippy::derive_partial_eq_without_eq)]
902#[derive(Clone, PartialEq, ::prost::Message)]
903pub struct FunctionProto {
904    /// The name of the function, similar usage of op_type in OperatorProto.
905    /// Combined with FunctionProto.domain, this forms the unique identity of
906    /// the FunctionProto.
907    #[prost(string, tag = "1")]
908    pub name: ::prost::alloc::string::String,
909    /// The inputs and outputs of the function.
910    #[prost(string, repeated, tag = "4")]
911    pub input: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
912    #[prost(string, repeated, tag = "5")]
913    pub output: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
914    /// The attribute parameters of the function.
915    /// It is for function parameters without default values.
916    #[prost(string, repeated, tag = "6")]
917    pub attribute: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
918    /// The attribute protos of the function.
919    /// It is for function attributes with default values.
920    /// A function attribute shall be represented either as
921    /// a string attribute or an AttributeProto, not both.
922    #[prost(message, repeated, tag = "11")]
923    pub attribute_proto: ::prost::alloc::vec::Vec<AttributeProto>,
924    /// The nodes in the function.
925    #[prost(message, repeated, tag = "7")]
926    pub node: ::prost::alloc::vec::Vec<NodeProto>,
927    /// A human-readable documentation for this function. Markdown is allowed.
928    #[prost(string, tag = "8")]
929    pub doc_string: ::prost::alloc::string::String,
930    #[prost(message, repeated, tag = "9")]
931    pub opset_import: ::prost::alloc::vec::Vec<OperatorSetIdProto>,
932    /// The domain which this function belongs to. Combined with FunctionProto.name, this forms the unique identity of
933    /// the FunctionProto.
934    #[prost(string, tag = "10")]
935    pub domain: ::prost::alloc::string::String,
936}
937/// Versioning
938///
939/// ONNX versioning is specified in docs/IR.md and elaborated on in docs/Versioning.md
940///
941/// To be compatible with both proto2 and proto3, we will use a version number
942/// that is not defined by the default value but an explicit enum number.
943#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
944#[repr(i32)]
945pub enum Version {
946    /// proto3 requires the first enum value to be zero.
947    /// We add this just to appease the compiler.
948    StartVersion = 0,
949    /// The version field is always serialized and we will use it to store the
950    /// version that the  graph is generated from. This helps us set up version
951    /// control.
952    /// For the IR, we are using simple numbers starting with 0x00000001,
953    /// which was the version we published on Oct 10, 2017.
954    IrVersion20171010 = 1,
955    /// IR_VERSION 2 published on Oct 30, 2017
956    /// - Added type discriminator to AttributeProto to support proto3 users
957    IrVersion20171030 = 2,
958    /// IR VERSION 3 published on Nov 3, 2017
959    /// - For operator versioning:
960    ///     - Added new message OperatorSetIdProto
961    ///     - Added opset_import in ModelProto
962    /// - For vendor extensions, added domain in NodeProto
963    IrVersion2017113 = 3,
964    /// IR VERSION 4 published on Jan 22, 2019
965    /// - Relax constraint that initializers should be a subset of graph inputs
966    /// - Add type BFLOAT16
967    IrVersion2019122 = 4,
968    /// IR VERSION 5 published on March 18, 2019
969    /// - Add message TensorAnnotation.
970    /// - Add quantization annotation in GraphProto to map tensor with its scale and zero point quantization parameters.
971    IrVersion2019318 = 5,
972    /// IR VERSION 6 published on Sep 19, 2019
973    /// - Add support for sparse tensor constants stored in model.
974    ///    - Add message SparseTensorProto
975    ///    - Add sparse initializers
976    IrVersion2019919 = 6,
977    /// IR VERSION 7 published on May 8, 2020
978    /// - Add support to allow function body graph to rely on multiple external opreator sets.
979    /// - Add a list to promote inference graph's initializers to global and
980    ///    mutable variables. Global variables are visible in all graphs of the
981    ///    stored models.
982    /// - Add message TrainingInfoProto to store initialization
983    ///    method and training algorithm. The execution of TrainingInfoProto
984    ///    can modify the values of mutable variables.
985    /// - Implicitly add inference graph into each TrainingInfoProto's algorithm.
986    IrVersion202058 = 7,
987    /// IR VERSION 8 published on July 30, 2021
988    /// Introduce TypeProto.SparseTensor
989    /// Introduce TypeProto.Optional
990    /// Added a list of FunctionProtos local to the model
991    /// Deprecated since_version and operator status from FunctionProto
992    IrVersion2021730 = 8,
993    /// IR VERSION 9 published on TBD
994    /// Added AttributeProto to FunctionProto so that default attribute values can be set.
995    /// Added FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ.
996    IrVersion = 9,
997}
998impl Version {
999    /// String value of the enum field names used in the ProtoBuf definition.
1000    ///
1001    /// The values are not transformed in any way and thus are considered stable
1002    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1003    pub fn as_str_name(&self) -> &'static str {
1004        match self {
1005            Version::StartVersion => "_START_VERSION",
1006            Version::IrVersion20171010 => "IR_VERSION_2017_10_10",
1007            Version::IrVersion20171030 => "IR_VERSION_2017_10_30",
1008            Version::IrVersion2017113 => "IR_VERSION_2017_11_3",
1009            Version::IrVersion2019122 => "IR_VERSION_2019_1_22",
1010            Version::IrVersion2019318 => "IR_VERSION_2019_3_18",
1011            Version::IrVersion2019919 => "IR_VERSION_2019_9_19",
1012            Version::IrVersion202058 => "IR_VERSION_2020_5_8",
1013            Version::IrVersion2021730 => "IR_VERSION_2021_7_30",
1014            Version::IrVersion => "IR_VERSION",
1015        }
1016    }
1017    /// Creates an enum from field names used in the ProtoBuf definition.
1018    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1019        match value {
1020            "_START_VERSION" => Some(Self::StartVersion),
1021            "IR_VERSION_2017_10_10" => Some(Self::IrVersion20171010),
1022            "IR_VERSION_2017_10_30" => Some(Self::IrVersion20171030),
1023            "IR_VERSION_2017_11_3" => Some(Self::IrVersion2017113),
1024            "IR_VERSION_2019_1_22" => Some(Self::IrVersion2019122),
1025            "IR_VERSION_2019_3_18" => Some(Self::IrVersion2019318),
1026            "IR_VERSION_2019_9_19" => Some(Self::IrVersion2019919),
1027            "IR_VERSION_2020_5_8" => Some(Self::IrVersion202058),
1028            "IR_VERSION_2021_7_30" => Some(Self::IrVersion2021730),
1029            "IR_VERSION" => Some(Self::IrVersion),
1030            _ => None,
1031        }
1032    }
1033}
1034/// Operator/function status.
1035#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
1036#[repr(i32)]
1037pub enum OperatorStatus {
1038    Experimental = 0,
1039    Stable = 1,
1040}
1041impl OperatorStatus {
1042    /// String value of the enum field names used in the ProtoBuf definition.
1043    ///
1044    /// The values are not transformed in any way and thus are considered stable
1045    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
1046    pub fn as_str_name(&self) -> &'static str {
1047        match self {
1048            OperatorStatus::Experimental => "EXPERIMENTAL",
1049            OperatorStatus::Stable => "STABLE",
1050        }
1051    }
1052    /// Creates an enum from field names used in the ProtoBuf definition.
1053    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
1054        match value {
1055            "EXPERIMENTAL" => Some(Self::Experimental),
1056            "STABLE" => Some(Self::Stable),
1057            _ => None,
1058        }
1059    }
1060}