opentelemetry_proto/proto/tonic/
opentelemetry.proto.metrics.v1.rs

1// This file is @generated by prost-build.
2/// MetricsData represents the metrics data that can be stored in a persistent
3/// storage, OR can be embedded by other protocols that transfer OTLP metrics
4/// data but do not implement the OTLP protocol.
5///
6/// MetricsData
7/// └─── ResourceMetrics
8///    ├── Resource
9///    ├── SchemaURL
10///    └── ScopeMetrics
11///       ├── Scope
12///       ├── SchemaURL
13///       └── Metric
14///          ├── Name
15///          ├── Description
16///          ├── Unit
17///          └── data
18///             ├── Gauge
19///             ├── Sum
20///             ├── Histogram
21///             ├── ExponentialHistogram
22///             └── Summary
23///
24/// The main difference between this message and collector protocol is that
25/// in this message there will not be any "control" or "metadata" specific to
26/// OTLP protocol.
27///
28/// When new fields are added into this message, the OTLP request MUST be updated
29/// as well.
30#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
31#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
32#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
33#[derive(Clone, PartialEq, ::prost::Message)]
34pub struct MetricsData {
35    /// An array of ResourceMetrics.
36    /// For data coming from a single resource this array will typically contain
37    /// one element. Intermediary nodes that receive data from multiple origins
38    /// typically batch the data before forwarding further and in that case this
39    /// array will contain multiple elements.
40    #[prost(message, repeated, tag = "1")]
41    pub resource_metrics: ::prost::alloc::vec::Vec<ResourceMetrics>,
42}
43/// A collection of ScopeMetrics from a Resource.
44#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
45#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
46#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
47#[cfg_attr(feature = "with-serde", serde(default))]
48#[derive(Clone, PartialEq, ::prost::Message)]
49pub struct ResourceMetrics {
50    /// The resource for the metrics in this message.
51    /// If this field is not set then no resource info is known.
52    #[prost(message, optional, tag = "1")]
53    pub resource: ::core::option::Option<super::super::resource::v1::Resource>,
54    /// A list of metrics that originate from a resource.
55    #[prost(message, repeated, tag = "2")]
56    pub scope_metrics: ::prost::alloc::vec::Vec<ScopeMetrics>,
57    /// The Schema URL, if known. This is the identifier of the Schema that the resource data
58    /// is recorded in. Notably, the last part of the URL path is the version number of the
59    /// schema: http\[s\]://server\[:port\]/path/<version>. To learn more about Schema URL see
60    /// <https://opentelemetry.io/docs/specs/otel/schemas/#schema-url>
61    /// This schema_url applies to the data in the "resource" field. It does not apply
62    /// to the data in the "scope_metrics" field which have their own schema_url field.
63    #[prost(string, tag = "3")]
64    pub schema_url: ::prost::alloc::string::String,
65}
66/// A collection of Metrics produced by an Scope.
67#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
68#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
69#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
70#[cfg_attr(feature = "with-serde", serde(default))]
71#[derive(Clone, PartialEq, ::prost::Message)]
72pub struct ScopeMetrics {
73    /// The instrumentation scope information for the metrics in this message.
74    /// Semantically when InstrumentationScope isn't set, it is equivalent with
75    /// an empty instrumentation scope name (unknown).
76    #[prost(message, optional, tag = "1")]
77    pub scope: ::core::option::Option<super::super::common::v1::InstrumentationScope>,
78    /// A list of metrics that originate from an instrumentation library.
79    #[prost(message, repeated, tag = "2")]
80    pub metrics: ::prost::alloc::vec::Vec<Metric>,
81    /// The Schema URL, if known. This is the identifier of the Schema that the metric data
82    /// is recorded in. Notably, the last part of the URL path is the version number of the
83    /// schema: http\[s\]://server\[:port\]/path/<version>. To learn more about Schema URL see
84    /// <https://opentelemetry.io/docs/specs/otel/schemas/#schema-url>
85    /// This schema_url applies to all metrics in the "metrics" field.
86    #[prost(string, tag = "3")]
87    pub schema_url: ::prost::alloc::string::String,
88}
89/// Defines a Metric which has one or more timeseries.  The following is a
90/// brief summary of the Metric data model.  For more details, see:
91///
92///    <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md>
93///
94/// The data model and relation between entities is shown in the
95/// diagram below. Here, "DataPoint" is the term used to refer to any
96/// one of the specific data point value types, and "points" is the term used
97/// to refer to any one of the lists of points contained in the Metric.
98///
99/// - Metric is composed of a metadata and data.
100/// - Metadata part contains a name, description, unit.
101/// - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
102/// - DataPoint contains timestamps, attributes, and one of the possible value type
103///    fields.
104///
105///     Metric
106///   +------------+
107///   |name        |
108///   |description |
109///   |unit        |     +------------------------------------+
110///   |data        |---> |Gauge, Sum, Histogram, Summary, ... |
111///   +------------+     +------------------------------------+
112///
113///     Data \[One of Gauge, Sum, Histogram, Summary, ...\]
114///   +-----------+
115///   |...        |  // Metadata about the Data.
116///   |points     |--+
117///   +-----------+  |
118///                  |      +---------------------------+
119///                  |      |DataPoint 1                |
120///                  v      |+------+------+   +------+ |
121///               +-----+   ||label |label |...|label | |
122///               |  1  |-->||value1|value2|...|valueN| |
123///               +-----+   |+------+------+   +------+ |
124///               |  .  |   |+-----+                    |
125///               |  .  |   ||value|                    |
126///               |  .  |   |+-----+                    |
127///               |  .  |   +---------------------------+
128///               |  .  |                   .
129///               |  .  |                   .
130///               |  .  |                   .
131///               |  .  |   +---------------------------+
132///               |  .  |   |DataPoint M                |
133///               +-----+   |+------+------+   +------+ |
134///               |  M  |-->||label |label |...|label | |
135///               +-----+   ||value1|value2|...|valueN| |
136///                         |+------+------+   +------+ |
137///                         |+-----+                    |
138///                         ||value|                    |
139///                         |+-----+                    |
140///                         +---------------------------+
141///
142/// Each distinct type of DataPoint represents the output of a specific
143/// aggregation function, the result of applying the DataPoint's
144/// associated function of to one or more measurements.
145///
146/// All DataPoint types have three common fields:
147/// - Attributes includes key-value pairs associated with the data point
148/// - TimeUnixNano is required, set to the end time of the aggregation
149/// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
150///    having an AggregationTemporality field, as discussed below.
151///
152/// Both TimeUnixNano and StartTimeUnixNano values are expressed as
153/// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
154///
155/// # TimeUnixNano
156///
157/// This field is required, having consistent interpretation across
158/// DataPoint types.  TimeUnixNano is the moment corresponding to when
159/// the data point's aggregate value was captured.
160///
161/// Data points with the 0 value for TimeUnixNano SHOULD be rejected
162/// by consumers.
163///
164/// # StartTimeUnixNano
165///
166/// StartTimeUnixNano in general allows detecting when a sequence of
167/// observations is unbroken.  This field indicates to consumers the
168/// start time for points with cumulative and delta
169/// AggregationTemporality, and it should be included whenever possible
170/// to support correct rate calculation.  Although it may be omitted
171/// when the start time is truly unknown, setting StartTimeUnixNano is
172/// strongly encouraged.
173#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
174#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
175#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
176#[cfg_attr(feature = "with-serde", serde(default))]
177#[derive(Clone, PartialEq, ::prost::Message)]
178pub struct Metric {
179    /// name of the metric.
180    #[prost(string, tag = "1")]
181    pub name: ::prost::alloc::string::String,
182    /// description of the metric, which can be used in documentation.
183    #[prost(string, tag = "2")]
184    pub description: ::prost::alloc::string::String,
185    /// unit in which the metric value is reported. Follows the format
186    /// described by <https://unitsofmeasure.org/ucum.html.>
187    #[prost(string, tag = "3")]
188    pub unit: ::prost::alloc::string::String,
189    /// Additional metadata attributes that describe the metric. \[Optional\].
190    /// Attributes are non-identifying.
191    /// Consumers SHOULD NOT need to be aware of these attributes.
192    /// These attributes MAY be used to encode information allowing
193    /// for lossless roundtrip translation to / from another data model.
194    /// Attribute keys MUST be unique (it is not allowed to have more than one
195    /// attribute with the same key).
196    #[prost(message, repeated, tag = "12")]
197    pub metadata: ::prost::alloc::vec::Vec<super::super::common::v1::KeyValue>,
198    /// Data determines the aggregation type (if any) of the metric, what is the
199    /// reported value type for the data points, as well as the relatationship to
200    /// the time interval over which they are reported.
201    #[prost(oneof = "metric::Data", tags = "5, 7, 9, 10, 11")]
202    #[cfg_attr(feature = "with-serde", serde(flatten))]
203    pub data: ::core::option::Option<metric::Data>,
204}
205/// Nested message and enum types in `Metric`.
206pub mod metric {
207    /// Data determines the aggregation type (if any) of the metric, what is the
208    /// reported value type for the data points, as well as the relatationship to
209    /// the time interval over which they are reported.
210    #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
211    #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
212    #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
213    #[derive(Clone, PartialEq, ::prost::Oneof)]
214    pub enum Data {
215        #[prost(message, tag = "5")]
216        Gauge(super::Gauge),
217        #[prost(message, tag = "7")]
218        Sum(super::Sum),
219        #[prost(message, tag = "9")]
220        Histogram(super::Histogram),
221        #[prost(message, tag = "10")]
222        ExponentialHistogram(super::ExponentialHistogram),
223        #[prost(message, tag = "11")]
224        Summary(super::Summary),
225    }
226}
227/// Gauge represents the type of a scalar metric that always exports the
228/// "current value" for every data point. It should be used for an "unknown"
229/// aggregation.
230///
231/// A Gauge does not support different aggregation temporalities. Given the
232/// aggregation is unknown, points cannot be combined using the same
233/// aggregation, regardless of aggregation temporalities. Therefore,
234/// AggregationTemporality is not included. Consequently, this also means
235/// "StartTimeUnixNano" is ignored for all data points.
236#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
237#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
238#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
239#[cfg_attr(feature = "with-serde", serde(default))]
240#[derive(Clone, PartialEq, ::prost::Message)]
241pub struct Gauge {
242    #[prost(message, repeated, tag = "1")]
243    pub data_points: ::prost::alloc::vec::Vec<NumberDataPoint>,
244}
245/// Sum represents the type of a scalar metric that is calculated as a sum of all
246/// reported measurements over a time interval.
247#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
248#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
249#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
250#[cfg_attr(feature = "with-serde", serde(default))]
251#[derive(Clone, PartialEq, ::prost::Message)]
252pub struct Sum {
253    #[prost(message, repeated, tag = "1")]
254    pub data_points: ::prost::alloc::vec::Vec<NumberDataPoint>,
255    /// aggregation_temporality describes if the aggregator reports delta changes
256    /// since last report time, or cumulative changes since a fixed start time.
257    #[prost(enumeration = "AggregationTemporality", tag = "2")]
258    pub aggregation_temporality: i32,
259    /// If "true" means that the sum is monotonic.
260    #[prost(bool, tag = "3")]
261    pub is_monotonic: bool,
262}
263/// Histogram represents the type of a metric that is calculated by aggregating
264/// as a Histogram of all reported measurements over a time interval.
265#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
266#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
267#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
268#[cfg_attr(feature = "with-serde", serde(default))]
269#[derive(Clone, PartialEq, ::prost::Message)]
270pub struct Histogram {
271    #[prost(message, repeated, tag = "1")]
272    pub data_points: ::prost::alloc::vec::Vec<HistogramDataPoint>,
273    /// aggregation_temporality describes if the aggregator reports delta changes
274    /// since last report time, or cumulative changes since a fixed start time.
275    #[prost(enumeration = "AggregationTemporality", tag = "2")]
276    pub aggregation_temporality: i32,
277}
278/// ExponentialHistogram represents the type of a metric that is calculated by aggregating
279/// as a ExponentialHistogram of all reported double measurements over a time interval.
280#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
281#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
282#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
283#[cfg_attr(feature = "with-serde", serde(default))]
284#[derive(Clone, PartialEq, ::prost::Message)]
285pub struct ExponentialHistogram {
286    #[prost(message, repeated, tag = "1")]
287    pub data_points: ::prost::alloc::vec::Vec<ExponentialHistogramDataPoint>,
288    /// aggregation_temporality describes if the aggregator reports delta changes
289    /// since last report time, or cumulative changes since a fixed start time.
290    #[prost(enumeration = "AggregationTemporality", tag = "2")]
291    pub aggregation_temporality: i32,
292}
293/// Summary metric data are used to convey quantile summaries,
294/// a Prometheus (see: <https://prometheus.io/docs/concepts/metric_types/#summary>)
295/// and OpenMetrics (see: <https://github.com/prometheus/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45>)
296/// data type. These data points cannot always be merged in a meaningful way.
297/// While they can be useful in some applications, histogram data points are
298/// recommended for new applications.
299/// Summary metrics do not have an aggregation temporality field. This is
300/// because the count and sum fields of a SummaryDataPoint are assumed to be
301/// cumulative values.
302#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
303#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
304#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
305#[cfg_attr(feature = "with-serde", serde(default))]
306#[derive(Clone, PartialEq, ::prost::Message)]
307pub struct Summary {
308    #[prost(message, repeated, tag = "1")]
309    pub data_points: ::prost::alloc::vec::Vec<SummaryDataPoint>,
310}
311/// NumberDataPoint is a single data point in a timeseries that describes the
312/// time-varying scalar value of a metric.
313#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
314#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
315#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
316#[cfg_attr(feature = "with-serde", serde(default))]
317#[derive(Clone, PartialEq, ::prost::Message)]
318pub struct NumberDataPoint {
319    /// The set of key/value pairs that uniquely identify the timeseries from
320    /// where this point belongs. The list may be empty (may contain 0 elements).
321    /// Attribute keys MUST be unique (it is not allowed to have more than one
322    /// attribute with the same key).
323    #[prost(message, repeated, tag = "7")]
324    pub attributes: ::prost::alloc::vec::Vec<super::super::common::v1::KeyValue>,
325    /// StartTimeUnixNano is optional but strongly encouraged, see the
326    /// the detailed comments above Metric.
327    ///
328    /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
329    /// 1970.
330    #[prost(fixed64, tag = "2")]
331    #[cfg_attr(
332        feature = "with-serde",
333        serde(
334            serialize_with = "crate::proto::serializers::serialize_u64_to_string",
335            deserialize_with = "crate::proto::serializers::deserialize_string_to_u64"
336        )
337    )]
338    pub start_time_unix_nano: u64,
339    /// TimeUnixNano is required, see the detailed comments above Metric.
340    ///
341    /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
342    /// 1970.
343    #[prost(fixed64, tag = "3")]
344    #[cfg_attr(
345        feature = "with-serde",
346        serde(
347            serialize_with = "crate::proto::serializers::serialize_u64_to_string",
348            deserialize_with = "crate::proto::serializers::deserialize_string_to_u64"
349        )
350    )]
351    pub time_unix_nano: u64,
352    /// (Optional) List of exemplars collected from
353    /// measurements that were used to form the data point
354    #[prost(message, repeated, tag = "5")]
355    pub exemplars: ::prost::alloc::vec::Vec<Exemplar>,
356    /// Flags that apply to this specific data point.  See DataPointFlags
357    /// for the available flags and their meaning.
358    #[prost(uint32, tag = "8")]
359    pub flags: u32,
360    /// The value itself.  A point is considered invalid when one of the recognized
361    /// value fields is not present inside this oneof.
362    #[prost(oneof = "number_data_point::Value", tags = "4, 6")]
363    #[cfg_attr(feature = "with-serde", serde(flatten))]
364    pub value: ::core::option::Option<number_data_point::Value>,
365}
366/// Nested message and enum types in `NumberDataPoint`.
367pub mod number_data_point {
368    /// The value itself.  A point is considered invalid when one of the recognized
369    /// value fields is not present inside this oneof.
370    #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
371    #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
372    #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
373    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
374    pub enum Value {
375        #[prost(double, tag = "4")]
376        AsDouble(f64),
377        #[prost(sfixed64, tag = "6")]
378        AsInt(i64),
379    }
380}
381/// HistogramDataPoint is a single data point in a timeseries that describes the
382/// time-varying values of a Histogram. A Histogram contains summary statistics
383/// for a population of values, it may optionally contain the distribution of
384/// those values across a set of buckets.
385///
386/// If the histogram contains the distribution of values, then both
387/// "explicit_bounds" and "bucket counts" fields must be defined.
388/// If the histogram does not contain the distribution of values, then both
389/// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
390/// "sum" are known.
391#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
392#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
393#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
394#[cfg_attr(feature = "with-serde", serde(default))]
395#[derive(Clone, PartialEq, ::prost::Message)]
396pub struct HistogramDataPoint {
397    /// The set of key/value pairs that uniquely identify the timeseries from
398    /// where this point belongs. The list may be empty (may contain 0 elements).
399    /// Attribute keys MUST be unique (it is not allowed to have more than one
400    /// attribute with the same key).
401    #[prost(message, repeated, tag = "9")]
402    pub attributes: ::prost::alloc::vec::Vec<super::super::common::v1::KeyValue>,
403    /// StartTimeUnixNano is optional but strongly encouraged, see the
404    /// the detailed comments above Metric.
405    ///
406    /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
407    /// 1970.
408    #[prost(fixed64, tag = "2")]
409    #[cfg_attr(
410        feature = "with-serde",
411        serde(
412            serialize_with = "crate::proto::serializers::serialize_u64_to_string",
413            deserialize_with = "crate::proto::serializers::deserialize_string_to_u64"
414        )
415    )]
416    pub start_time_unix_nano: u64,
417    /// TimeUnixNano is required, see the detailed comments above Metric.
418    ///
419    /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
420    /// 1970.
421    #[prost(fixed64, tag = "3")]
422    #[cfg_attr(
423        feature = "with-serde",
424        serde(
425            serialize_with = "crate::proto::serializers::serialize_u64_to_string",
426            deserialize_with = "crate::proto::serializers::deserialize_string_to_u64"
427        )
428    )]
429    pub time_unix_nano: u64,
430    /// count is the number of values in the population. Must be non-negative. This
431    /// value must be equal to the sum of the "count" fields in buckets if a
432    /// histogram is provided.
433    #[prost(fixed64, tag = "4")]
434    pub count: u64,
435    /// sum of the values in the population. If count is zero then this field
436    /// must be zero.
437    ///
438    /// Note: Sum should only be filled out when measuring non-negative discrete
439    /// events, and is assumed to be monotonic over the values of these events.
440    /// Negative events *can* be recorded, but sum should not be filled out when
441    /// doing so.  This is specifically to enforce compatibility w/ OpenMetrics,
442    /// see: <https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram>
443    #[prost(double, optional, tag = "5")]
444    pub sum: ::core::option::Option<f64>,
445    /// bucket_counts is an optional field contains the count values of histogram
446    /// for each bucket.
447    ///
448    /// The sum of the bucket_counts must equal the value in the count field.
449    ///
450    /// The number of elements in bucket_counts array must be by one greater than
451    /// the number of elements in explicit_bounds array. The exception to this rule
452    /// is when the length of bucket_counts is 0, then the length of explicit_bounds
453    /// must also be 0.
454    #[prost(fixed64, repeated, tag = "6")]
455    pub bucket_counts: ::prost::alloc::vec::Vec<u64>,
456    /// explicit_bounds specifies buckets with explicitly defined bounds for values.
457    ///
458    /// The boundaries for bucket at index i are:
459    ///
460    /// (-infinity, explicit_bounds\[i]\] for i == 0
461    /// (explicit_bounds\[i-1\], explicit_bounds\[i]\] for 0 < i < size(explicit_bounds)
462    /// (explicit_bounds\[i-1\], +infinity) for i == size(explicit_bounds)
463    ///
464    /// The values in the explicit_bounds array must be strictly increasing.
465    ///
466    /// Histogram buckets are inclusive of their upper boundary, except the last
467    /// bucket where the boundary is at infinity. This format is intentionally
468    /// compatible with the OpenMetrics histogram definition.
469    ///
470    /// If bucket_counts length is 0 then explicit_bounds length must also be 0,
471    /// otherwise the data point is invalid.
472    #[prost(double, repeated, tag = "7")]
473    pub explicit_bounds: ::prost::alloc::vec::Vec<f64>,
474    /// (Optional) List of exemplars collected from
475    /// measurements that were used to form the data point
476    #[prost(message, repeated, tag = "8")]
477    pub exemplars: ::prost::alloc::vec::Vec<Exemplar>,
478    /// Flags that apply to this specific data point.  See DataPointFlags
479    /// for the available flags and their meaning.
480    #[prost(uint32, tag = "10")]
481    pub flags: u32,
482    /// min is the minimum value over (start_time, end_time].
483    #[prost(double, optional, tag = "11")]
484    pub min: ::core::option::Option<f64>,
485    /// max is the maximum value over (start_time, end_time].
486    #[prost(double, optional, tag = "12")]
487    pub max: ::core::option::Option<f64>,
488}
489/// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
490/// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
491/// summary statistics for a population of values, it may optionally contain the
492/// distribution of those values across a set of buckets.
493///
494#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
495#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
496#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
497#[derive(Clone, PartialEq, ::prost::Message)]
498pub struct ExponentialHistogramDataPoint {
499    /// The set of key/value pairs that uniquely identify the timeseries from
500    /// where this point belongs. The list may be empty (may contain 0 elements).
501    /// Attribute keys MUST be unique (it is not allowed to have more than one
502    /// attribute with the same key).
503    #[prost(message, repeated, tag = "1")]
504    pub attributes: ::prost::alloc::vec::Vec<super::super::common::v1::KeyValue>,
505    /// StartTimeUnixNano is optional but strongly encouraged, see the
506    /// the detailed comments above Metric.
507    ///
508    /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
509    /// 1970.
510    #[prost(fixed64, tag = "2")]
511    pub start_time_unix_nano: u64,
512    /// TimeUnixNano is required, see the detailed comments above Metric.
513    ///
514    /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
515    /// 1970.
516    #[prost(fixed64, tag = "3")]
517    pub time_unix_nano: u64,
518    /// count is the number of values in the population. Must be
519    /// non-negative. This value must be equal to the sum of the "bucket_counts"
520    /// values in the positive and negative Buckets plus the "zero_count" field.
521    #[prost(fixed64, tag = "4")]
522    pub count: u64,
523    /// sum of the values in the population. If count is zero then this field
524    /// must be zero.
525    ///
526    /// Note: Sum should only be filled out when measuring non-negative discrete
527    /// events, and is assumed to be monotonic over the values of these events.
528    /// Negative events *can* be recorded, but sum should not be filled out when
529    /// doing so.  This is specifically to enforce compatibility w/ OpenMetrics,
530    /// see: <https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram>
531    #[prost(double, optional, tag = "5")]
532    pub sum: ::core::option::Option<f64>,
533    /// scale describes the resolution of the histogram.  Boundaries are
534    /// located at powers of the base, where:
535    ///
536    ///    base = (2^(2^-scale))
537    ///
538    /// The histogram bucket identified by `index`, a signed integer,
539    /// contains values that are greater than (base^index) and
540    /// less than or equal to (base^(index+1)).
541    ///
542    /// The positive and negative ranges of the histogram are expressed
543    /// separately.  Negative values are mapped by their absolute value
544    /// into the negative range using the same scale as the positive range.
545    ///
546    /// scale is not restricted by the protocol, as the permissible
547    /// values depend on the range of the data.
548    #[prost(sint32, tag = "6")]
549    pub scale: i32,
550    /// zero_count is the count of values that are either exactly zero or
551    /// within the region considered zero by the instrumentation at the
552    /// tolerated degree of precision.  This bucket stores values that
553    /// cannot be expressed using the standard exponential formula as
554    /// well as values that have been rounded to zero.
555    ///
556    /// Implementations MAY consider the zero bucket to have probability
557    /// mass equal to (zero_count / count).
558    #[prost(fixed64, tag = "7")]
559    pub zero_count: u64,
560    /// positive carries the positive range of exponential bucket counts.
561    #[prost(message, optional, tag = "8")]
562    pub positive: ::core::option::Option<exponential_histogram_data_point::Buckets>,
563    /// negative carries the negative range of exponential bucket counts.
564    #[prost(message, optional, tag = "9")]
565    pub negative: ::core::option::Option<exponential_histogram_data_point::Buckets>,
566    /// Flags that apply to this specific data point.  See DataPointFlags
567    /// for the available flags and their meaning.
568    #[prost(uint32, tag = "10")]
569    pub flags: u32,
570    /// (Optional) List of exemplars collected from
571    /// measurements that were used to form the data point
572    #[prost(message, repeated, tag = "11")]
573    pub exemplars: ::prost::alloc::vec::Vec<Exemplar>,
574    /// min is the minimum value over (start_time, end_time].
575    #[prost(double, optional, tag = "12")]
576    pub min: ::core::option::Option<f64>,
577    /// max is the maximum value over (start_time, end_time].
578    #[prost(double, optional, tag = "13")]
579    pub max: ::core::option::Option<f64>,
580    /// ZeroThreshold may be optionally set to convey the width of the zero
581    /// region. Where the zero region is defined as the closed interval
582    /// \[-ZeroThreshold, ZeroThreshold\].
583    /// When ZeroThreshold is 0, zero count bucket stores values that cannot be
584    /// expressed using the standard exponential formula as well as values that
585    /// have been rounded to zero.
586    #[prost(double, tag = "14")]
587    pub zero_threshold: f64,
588}
589/// Nested message and enum types in `ExponentialHistogramDataPoint`.
590pub mod exponential_histogram_data_point {
591    /// Buckets are a set of bucket counts, encoded in a contiguous array
592    /// of counts.
593    #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
594    #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
595    #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
596    #[derive(Clone, PartialEq, ::prost::Message)]
597    pub struct Buckets {
598        /// Offset is the bucket index of the first entry in the bucket_counts array.
599        ///
600        /// Note: This uses a varint encoding as a simple form of compression.
601        #[prost(sint32, tag = "1")]
602        pub offset: i32,
603        /// bucket_counts is an array of count values, where bucket_counts\[i\] carries
604        /// the count of the bucket at index (offset+i). bucket_counts\[i\] is the count
605        /// of values greater than base^(offset+i) and less than or equal to
606        /// base^(offset+i+1).
607        ///
608        /// Note: By contrast, the explicit HistogramDataPoint uses
609        /// fixed64.  This field is expected to have many buckets,
610        /// especially zeros, so uint64 has been selected to ensure
611        /// varint encoding.
612        #[prost(uint64, repeated, tag = "2")]
613        pub bucket_counts: ::prost::alloc::vec::Vec<u64>,
614    }
615}
616/// SummaryDataPoint is a single data point in a timeseries that describes the
617/// time-varying values of a Summary metric. The count and sum fields represent
618/// cumulative values.
619#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
620#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
621#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
622#[derive(Clone, PartialEq, ::prost::Message)]
623pub struct SummaryDataPoint {
624    /// The set of key/value pairs that uniquely identify the timeseries from
625    /// where this point belongs. The list may be empty (may contain 0 elements).
626    /// Attribute keys MUST be unique (it is not allowed to have more than one
627    /// attribute with the same key).
628    #[prost(message, repeated, tag = "7")]
629    pub attributes: ::prost::alloc::vec::Vec<super::super::common::v1::KeyValue>,
630    /// StartTimeUnixNano is optional but strongly encouraged, see the
631    /// the detailed comments above Metric.
632    ///
633    /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
634    /// 1970.
635    #[prost(fixed64, tag = "2")]
636    pub start_time_unix_nano: u64,
637    /// TimeUnixNano is required, see the detailed comments above Metric.
638    ///
639    /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
640    /// 1970.
641    #[prost(fixed64, tag = "3")]
642    pub time_unix_nano: u64,
643    /// count is the number of values in the population. Must be non-negative.
644    #[prost(fixed64, tag = "4")]
645    pub count: u64,
646    /// sum of the values in the population. If count is zero then this field
647    /// must be zero.
648    ///
649    /// Note: Sum should only be filled out when measuring non-negative discrete
650    /// events, and is assumed to be monotonic over the values of these events.
651    /// Negative events *can* be recorded, but sum should not be filled out when
652    /// doing so.  This is specifically to enforce compatibility w/ OpenMetrics,
653    /// see: <https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#summary>
654    #[prost(double, tag = "5")]
655    pub sum: f64,
656    /// (Optional) list of values at different quantiles of the distribution calculated
657    /// from the current snapshot. The quantiles must be strictly increasing.
658    #[prost(message, repeated, tag = "6")]
659    pub quantile_values: ::prost::alloc::vec::Vec<summary_data_point::ValueAtQuantile>,
660    /// Flags that apply to this specific data point.  See DataPointFlags
661    /// for the available flags and their meaning.
662    #[prost(uint32, tag = "8")]
663    pub flags: u32,
664}
665/// Nested message and enum types in `SummaryDataPoint`.
666pub mod summary_data_point {
667    /// Represents the value at a given quantile of a distribution.
668    ///
669    /// To record Min and Max values following conventions are used:
670    /// - The 1.0 quantile is equivalent to the maximum value observed.
671    /// - The 0.0 quantile is equivalent to the minimum value observed.
672    ///
673    /// See the following issue for more context:
674    /// <https://github.com/open-telemetry/opentelemetry-proto/issues/125>
675    #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
676    #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
677    #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
678    #[derive(Clone, Copy, PartialEq, ::prost::Message)]
679    pub struct ValueAtQuantile {
680        /// The quantile of a distribution. Must be in the interval
681        /// \[0.0, 1.0\].
682        #[prost(double, tag = "1")]
683        pub quantile: f64,
684        /// The value at the given quantile of a distribution.
685        ///
686        /// Quantile values must NOT be negative.
687        #[prost(double, tag = "2")]
688        pub value: f64,
689    }
690}
691/// A representation of an exemplar, which is a sample input measurement.
692/// Exemplars also hold information about the environment when the measurement
693/// was recorded, for example the span and trace ID of the active span when the
694/// exemplar was recorded.
695#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
696#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
697#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
698#[derive(Clone, PartialEq, ::prost::Message)]
699pub struct Exemplar {
700    /// The set of key/value pairs that were filtered out by the aggregator, but
701    /// recorded alongside the original measurement. Only key/value pairs that were
702    /// filtered out by the aggregator should be included
703    #[prost(message, repeated, tag = "7")]
704    pub filtered_attributes: ::prost::alloc::vec::Vec<
705        super::super::common::v1::KeyValue,
706    >,
707    /// time_unix_nano is the exact time when this exemplar was recorded
708    ///
709    /// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
710    /// 1970.
711    #[prost(fixed64, tag = "2")]
712    pub time_unix_nano: u64,
713    /// (Optional) Span ID of the exemplar trace.
714    /// span_id may be missing if the measurement is not recorded inside a trace
715    /// or if the trace is not sampled.
716    #[prost(bytes = "vec", tag = "4")]
717    #[cfg_attr(
718        feature = "with-serde",
719        serde(
720            serialize_with = "crate::proto::serializers::serialize_to_hex_string",
721            deserialize_with = "crate::proto::serializers::deserialize_from_hex_string"
722        )
723    )]
724    pub span_id: ::prost::alloc::vec::Vec<u8>,
725    /// (Optional) Trace ID of the exemplar trace.
726    /// trace_id may be missing if the measurement is not recorded inside a trace
727    /// or if the trace is not sampled.
728    #[prost(bytes = "vec", tag = "5")]
729    #[cfg_attr(
730        feature = "with-serde",
731        serde(
732            serialize_with = "crate::proto::serializers::serialize_to_hex_string",
733            deserialize_with = "crate::proto::serializers::deserialize_from_hex_string"
734        )
735    )]
736    pub trace_id: ::prost::alloc::vec::Vec<u8>,
737    /// The value of the measurement that was recorded. An exemplar is
738    /// considered invalid when one of the recognized value fields is not present
739    /// inside this oneof.
740    #[prost(oneof = "exemplar::Value", tags = "3, 6")]
741    pub value: ::core::option::Option<exemplar::Value>,
742}
743/// Nested message and enum types in `Exemplar`.
744pub mod exemplar {
745    /// The value of the measurement that was recorded. An exemplar is
746    /// considered invalid when one of the recognized value fields is not present
747    /// inside this oneof.
748    #[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
749    #[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
750    #[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
751    #[derive(Clone, Copy, PartialEq, ::prost::Oneof)]
752    pub enum Value {
753        #[prost(double, tag = "3")]
754        AsDouble(f64),
755        #[prost(sfixed64, tag = "6")]
756        AsInt(i64),
757    }
758}
759/// AggregationTemporality defines how a metric aggregator reports aggregated
760/// values. It describes how those values relate to the time interval over
761/// which they are aggregated.
762#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
763#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
764#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
765#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
766#[repr(i32)]
767pub enum AggregationTemporality {
768    /// UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
769    Unspecified = 0,
770    /// DELTA is an AggregationTemporality for a metric aggregator which reports
771    /// changes since last report time. Successive metrics contain aggregation of
772    /// values from continuous and non-overlapping intervals.
773    ///
774    /// The values for a DELTA metric are based only on the time interval
775    /// associated with one measurement cycle. There is no dependency on
776    /// previous measurements like is the case for CUMULATIVE metrics.
777    ///
778    /// For example, consider a system measuring the number of requests that
779    /// it receives and reports the sum of these requests every second as a
780    /// DELTA metric:
781    ///
782    ///    1. The system starts receiving at time=t_0.
783    ///    2. A request is received, the system measures 1 request.
784    ///    3. A request is received, the system measures 1 request.
785    ///    4. A request is received, the system measures 1 request.
786    ///    5. The 1 second collection cycle ends. A metric is exported for the
787    ///       number of requests received over the interval of time t_0 to
788    ///       t_0+1 with a value of 3.
789    ///    6. A request is received, the system measures 1 request.
790    ///    7. A request is received, the system measures 1 request.
791    ///    8. The 1 second collection cycle ends. A metric is exported for the
792    ///       number of requests received over the interval of time t_0+1 to
793    ///       t_0+2 with a value of 2.
794    Delta = 1,
795    /// CUMULATIVE is an AggregationTemporality for a metric aggregator which
796    /// reports changes since a fixed start time. This means that current values
797    /// of a CUMULATIVE metric depend on all previous measurements since the
798    /// start time. Because of this, the sender is required to retain this state
799    /// in some form. If this state is lost or invalidated, the CUMULATIVE metric
800    /// values MUST be reset and a new fixed start time following the last
801    /// reported measurement time sent MUST be used.
802    ///
803    /// For example, consider a system measuring the number of requests that
804    /// it receives and reports the sum of these requests every second as a
805    /// CUMULATIVE metric:
806    ///
807    ///    1. The system starts receiving at time=t_0.
808    ///    2. A request is received, the system measures 1 request.
809    ///    3. A request is received, the system measures 1 request.
810    ///    4. A request is received, the system measures 1 request.
811    ///    5. The 1 second collection cycle ends. A metric is exported for the
812    ///       number of requests received over the interval of time t_0 to
813    ///       t_0+1 with a value of 3.
814    ///    6. A request is received, the system measures 1 request.
815    ///    7. A request is received, the system measures 1 request.
816    ///    8. The 1 second collection cycle ends. A metric is exported for the
817    ///       number of requests received over the interval of time t_0 to
818    ///       t_0+2 with a value of 5.
819    ///    9. The system experiences a fault and loses state.
820    ///    10. The system recovers and resumes receiving at time=t_1.
821    ///    11. A request is received, the system measures 1 request.
822    ///    12. The 1 second collection cycle ends. A metric is exported for the
823    ///       number of requests received over the interval of time t_1 to
824    ///       t_0+1 with a value of 1.
825    ///
826    /// Note: Even though, when reporting changes since last report time, using
827    /// CUMULATIVE is valid, it is not recommended. This may cause problems for
828    /// systems that do not use start_time to determine when the aggregation
829    /// value was reset (e.g. Prometheus).
830    Cumulative = 2,
831}
832impl AggregationTemporality {
833    /// String value of the enum field names used in the ProtoBuf definition.
834    ///
835    /// The values are not transformed in any way and thus are considered stable
836    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
837    pub fn as_str_name(&self) -> &'static str {
838        match self {
839            Self::Unspecified => "AGGREGATION_TEMPORALITY_UNSPECIFIED",
840            Self::Delta => "AGGREGATION_TEMPORALITY_DELTA",
841            Self::Cumulative => "AGGREGATION_TEMPORALITY_CUMULATIVE",
842        }
843    }
844    /// Creates an enum from field names used in the ProtoBuf definition.
845    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
846        match value {
847            "AGGREGATION_TEMPORALITY_UNSPECIFIED" => Some(Self::Unspecified),
848            "AGGREGATION_TEMPORALITY_DELTA" => Some(Self::Delta),
849            "AGGREGATION_TEMPORALITY_CUMULATIVE" => Some(Self::Cumulative),
850            _ => None,
851        }
852    }
853}
854/// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
855/// bit-field representing 32 distinct boolean flags.  Each flag defined in this
856/// enum is a bit-mask.  To test the presence of a single flag in the flags of
857/// a data point, for example, use an expression like:
858///
859///    (point.flags & DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK) == DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK
860///
861#[cfg_attr(feature = "with-schemars", derive(schemars::JsonSchema))]
862#[cfg_attr(feature = "with-serde", derive(serde::Serialize, serde::Deserialize))]
863#[cfg_attr(feature = "with-serde", serde(rename_all = "camelCase"))]
864#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
865#[repr(i32)]
866pub enum DataPointFlags {
867    /// The zero value for the enum. Should not be used for comparisons.
868    /// Instead use bitwise "and" with the appropriate mask as shown above.
869    DoNotUse = 0,
870    /// This DataPoint is valid but has no recorded value.  This value
871    /// SHOULD be used to reflect explicitly missing data in a series, as
872    /// for an equivalent to the Prometheus "staleness marker".
873    NoRecordedValueMask = 1,
874}
875impl DataPointFlags {
876    /// String value of the enum field names used in the ProtoBuf definition.
877    ///
878    /// The values are not transformed in any way and thus are considered stable
879    /// (if the ProtoBuf definition does not change) and safe for programmatic use.
880    pub fn as_str_name(&self) -> &'static str {
881        match self {
882            Self::DoNotUse => "DATA_POINT_FLAGS_DO_NOT_USE",
883            Self::NoRecordedValueMask => "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK",
884        }
885    }
886    /// Creates an enum from field names used in the ProtoBuf definition.
887    pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
888        match value {
889            "DATA_POINT_FLAGS_DO_NOT_USE" => Some(Self::DoNotUse),
890            "DATA_POINT_FLAGS_NO_RECORDED_VALUE_MASK" => Some(Self::NoRecordedValueMask),
891            _ => None,
892        }
893    }
894}