xds_api/generated/envoy.service.load_stats.v3.rs
1// This file is @generated by prost-build.
2/// A load report Envoy sends to the management server.
3#[derive(Clone, PartialEq, ::prost::Message)]
4pub struct LoadStatsRequest {
5 /// Node identifier for Envoy instance.
6 #[prost(message, optional, tag = "1")]
7 pub node: ::core::option::Option<super::super::super::config::core::v3::Node>,
8 /// A list of load stats to report.
9 #[prost(message, repeated, tag = "2")]
10 pub cluster_stats: ::prost::alloc::vec::Vec<
11 super::super::super::config::endpoint::v3::ClusterStats,
12 >,
13}
14impl ::prost::Name for LoadStatsRequest {
15 const NAME: &'static str = "LoadStatsRequest";
16 const PACKAGE: &'static str = "envoy.service.load_stats.v3";
17 fn full_name() -> ::prost::alloc::string::String {
18 "envoy.service.load_stats.v3.LoadStatsRequest".into()
19 }
20 fn type_url() -> ::prost::alloc::string::String {
21 "type.googleapis.com/envoy.service.load_stats.v3.LoadStatsRequest".into()
22 }
23}
24/// The management server sends envoy a LoadStatsResponse with all clusters it
25/// is interested in learning load stats about.
26#[derive(Clone, PartialEq, ::prost::Message)]
27pub struct LoadStatsResponse {
28 /// Clusters to report stats for.
29 /// Not populated if ``send_all_clusters`` is true.
30 #[prost(string, repeated, tag = "1")]
31 pub clusters: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
32 /// If true, the client should send all clusters it knows about.
33 /// Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their
34 /// :ref:`client_features<envoy_v3_api_field_config.core.v3.Node.client_features>` field will honor this field.
35 #[prost(bool, tag = "4")]
36 pub send_all_clusters: bool,
37 /// The minimum interval of time to collect stats over. This is only a minimum for two reasons:
38 ///
39 /// 1. There may be some delay from when the timer fires until stats sampling occurs.
40 /// 2. For clusters that were already feature in the previous ``LoadStatsResponse``, any traffic
41 /// that is observed in between the corresponding previous ``LoadStatsRequest`` and this
42 /// ``LoadStatsResponse`` will also be accumulated and billed to the cluster. This avoids a period
43 /// of inobservability that might otherwise exists between the messages. New clusters are not
44 /// subject to this consideration.
45 #[prost(message, optional, tag = "2")]
46 pub load_reporting_interval: ::core::option::Option<
47 super::super::super::super::google::protobuf::Duration,
48 >,
49 /// Set to ``true`` if the management server supports endpoint granularity
50 /// report.
51 #[prost(bool, tag = "3")]
52 pub report_endpoint_granularity: bool,
53}
54impl ::prost::Name for LoadStatsResponse {
55 const NAME: &'static str = "LoadStatsResponse";
56 const PACKAGE: &'static str = "envoy.service.load_stats.v3";
57 fn full_name() -> ::prost::alloc::string::String {
58 "envoy.service.load_stats.v3.LoadStatsResponse".into()
59 }
60 fn type_url() -> ::prost::alloc::string::String {
61 "type.googleapis.com/envoy.service.load_stats.v3.LoadStatsResponse".into()
62 }
63}
64/// Generated client implementations.
65pub mod load_reporting_service_client {
66 #![allow(
67 unused_variables,
68 dead_code,
69 missing_docs,
70 clippy::wildcard_imports,
71 clippy::let_unit_value,
72 )]
73 use tonic::codegen::*;
74 use tonic::codegen::http::Uri;
75 #[derive(Debug, Clone)]
76 pub struct LoadReportingServiceClient<T> {
77 inner: tonic::client::Grpc<T>,
78 }
79 impl<T> LoadReportingServiceClient<T>
80 where
81 T: tonic::client::GrpcService<tonic::body::BoxBody>,
82 T::Error: Into<StdError>,
83 T::ResponseBody: Body<Data = Bytes> + std::marker::Send + 'static,
84 <T::ResponseBody as Body>::Error: Into<StdError> + std::marker::Send,
85 {
86 pub fn new(inner: T) -> Self {
87 let inner = tonic::client::Grpc::new(inner);
88 Self { inner }
89 }
90 pub fn with_origin(inner: T, origin: Uri) -> Self {
91 let inner = tonic::client::Grpc::with_origin(inner, origin);
92 Self { inner }
93 }
94 pub fn with_interceptor<F>(
95 inner: T,
96 interceptor: F,
97 ) -> LoadReportingServiceClient<InterceptedService<T, F>>
98 where
99 F: tonic::service::Interceptor,
100 T::ResponseBody: Default,
101 T: tonic::codegen::Service<
102 http::Request<tonic::body::BoxBody>,
103 Response = http::Response<
104 <T as tonic::client::GrpcService<tonic::body::BoxBody>>::ResponseBody,
105 >,
106 >,
107 <T as tonic::codegen::Service<
108 http::Request<tonic::body::BoxBody>,
109 >>::Error: Into<StdError> + std::marker::Send + std::marker::Sync,
110 {
111 LoadReportingServiceClient::new(InterceptedService::new(inner, interceptor))
112 }
113 /// Compress requests with the given encoding.
114 ///
115 /// This requires the server to support it otherwise it might respond with an
116 /// error.
117 #[must_use]
118 pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
119 self.inner = self.inner.send_compressed(encoding);
120 self
121 }
122 /// Enable decompressing responses.
123 #[must_use]
124 pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
125 self.inner = self.inner.accept_compressed(encoding);
126 self
127 }
128 /// Limits the maximum size of a decoded message.
129 ///
130 /// Default: `4MB`
131 #[must_use]
132 pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
133 self.inner = self.inner.max_decoding_message_size(limit);
134 self
135 }
136 /// Limits the maximum size of an encoded message.
137 ///
138 /// Default: `usize::MAX`
139 #[must_use]
140 pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
141 self.inner = self.inner.max_encoding_message_size(limit);
142 self
143 }
144 /// Advanced API to allow for multi-dimensional load balancing by remote
145 /// server. For receiving LB assignments, the steps are:
146 /// 1, The management server is configured with per cluster/zone/load metric
147 /// capacity configuration. The capacity configuration definition is
148 /// outside of the scope of this document.
149 /// 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters
150 /// to balance.
151 ///
152 /// Independently, Envoy will initiate a StreamLoadStats bidi stream with a
153 /// management server:
154 /// 1. Once a connection establishes, the management server publishes a
155 /// LoadStatsResponse for all clusters it is interested in learning load
156 /// stats about.
157 /// 2. For each cluster, Envoy load balances incoming traffic to upstream hosts
158 /// based on per-zone weights and/or per-instance weights (if specified)
159 /// based on intra-zone LbPolicy. This information comes from the above
160 /// {Stream,Fetch}Endpoints.
161 /// 3. When upstream hosts reply, they optionally add header <define header
162 /// name> with ASCII representation of EndpointLoadMetricStats.
163 /// 4. Envoy aggregates load reports over the period of time given to it in
164 /// LoadStatsResponse.load_reporting_interval. This includes aggregation
165 /// stats Envoy maintains by itself (total_requests, rpc_errors etc.) as
166 /// well as load metrics from upstream hosts.
167 /// 5. When the timer of load_reporting_interval expires, Envoy sends new
168 /// LoadStatsRequest filled with load reports for each cluster.
169 /// 6. The management server uses the load reports from all reported Envoys
170 /// from around the world, computes global assignment and prepares traffic
171 /// assignment destined for each zone Envoys are located in. Goto 2.
172 pub async fn stream_load_stats(
173 &mut self,
174 request: impl tonic::IntoStreamingRequest<Message = super::LoadStatsRequest>,
175 ) -> std::result::Result<
176 tonic::Response<tonic::codec::Streaming<super::LoadStatsResponse>>,
177 tonic::Status,
178 > {
179 self.inner
180 .ready()
181 .await
182 .map_err(|e| {
183 tonic::Status::unknown(
184 format!("Service was not ready: {}", e.into()),
185 )
186 })?;
187 let codec = tonic::codec::ProstCodec::default();
188 let path = http::uri::PathAndQuery::from_static(
189 "/envoy.service.load_stats.v3.LoadReportingService/StreamLoadStats",
190 );
191 let mut req = request.into_streaming_request();
192 req.extensions_mut()
193 .insert(
194 GrpcMethod::new(
195 "envoy.service.load_stats.v3.LoadReportingService",
196 "StreamLoadStats",
197 ),
198 );
199 self.inner.streaming(req, path, codec).await
200 }
201 }
202}
203/// Generated server implementations.
204pub mod load_reporting_service_server {
205 #![allow(
206 unused_variables,
207 dead_code,
208 missing_docs,
209 clippy::wildcard_imports,
210 clippy::let_unit_value,
211 )]
212 use tonic::codegen::*;
213 /// Generated trait containing gRPC methods that should be implemented for use with LoadReportingServiceServer.
214 #[async_trait]
215 pub trait LoadReportingService: std::marker::Send + std::marker::Sync + 'static {
216 /// Server streaming response type for the StreamLoadStats method.
217 type StreamLoadStatsStream: tonic::codegen::tokio_stream::Stream<
218 Item = std::result::Result<super::LoadStatsResponse, tonic::Status>,
219 >
220 + std::marker::Send
221 + 'static;
222 /// Advanced API to allow for multi-dimensional load balancing by remote
223 /// server. For receiving LB assignments, the steps are:
224 /// 1, The management server is configured with per cluster/zone/load metric
225 /// capacity configuration. The capacity configuration definition is
226 /// outside of the scope of this document.
227 /// 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters
228 /// to balance.
229 ///
230 /// Independently, Envoy will initiate a StreamLoadStats bidi stream with a
231 /// management server:
232 /// 1. Once a connection establishes, the management server publishes a
233 /// LoadStatsResponse for all clusters it is interested in learning load
234 /// stats about.
235 /// 2. For each cluster, Envoy load balances incoming traffic to upstream hosts
236 /// based on per-zone weights and/or per-instance weights (if specified)
237 /// based on intra-zone LbPolicy. This information comes from the above
238 /// {Stream,Fetch}Endpoints.
239 /// 3. When upstream hosts reply, they optionally add header <define header
240 /// name> with ASCII representation of EndpointLoadMetricStats.
241 /// 4. Envoy aggregates load reports over the period of time given to it in
242 /// LoadStatsResponse.load_reporting_interval. This includes aggregation
243 /// stats Envoy maintains by itself (total_requests, rpc_errors etc.) as
244 /// well as load metrics from upstream hosts.
245 /// 5. When the timer of load_reporting_interval expires, Envoy sends new
246 /// LoadStatsRequest filled with load reports for each cluster.
247 /// 6. The management server uses the load reports from all reported Envoys
248 /// from around the world, computes global assignment and prepares traffic
249 /// assignment destined for each zone Envoys are located in. Goto 2.
250 async fn stream_load_stats(
251 &self,
252 request: tonic::Request<tonic::Streaming<super::LoadStatsRequest>>,
253 ) -> std::result::Result<
254 tonic::Response<Self::StreamLoadStatsStream>,
255 tonic::Status,
256 >;
257 }
258 #[derive(Debug)]
259 pub struct LoadReportingServiceServer<T> {
260 inner: Arc<T>,
261 accept_compression_encodings: EnabledCompressionEncodings,
262 send_compression_encodings: EnabledCompressionEncodings,
263 max_decoding_message_size: Option<usize>,
264 max_encoding_message_size: Option<usize>,
265 }
266 impl<T> LoadReportingServiceServer<T> {
267 pub fn new(inner: T) -> Self {
268 Self::from_arc(Arc::new(inner))
269 }
270 pub fn from_arc(inner: Arc<T>) -> Self {
271 Self {
272 inner,
273 accept_compression_encodings: Default::default(),
274 send_compression_encodings: Default::default(),
275 max_decoding_message_size: None,
276 max_encoding_message_size: None,
277 }
278 }
279 pub fn with_interceptor<F>(
280 inner: T,
281 interceptor: F,
282 ) -> InterceptedService<Self, F>
283 where
284 F: tonic::service::Interceptor,
285 {
286 InterceptedService::new(Self::new(inner), interceptor)
287 }
288 /// Enable decompressing requests with the given encoding.
289 #[must_use]
290 pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self {
291 self.accept_compression_encodings.enable(encoding);
292 self
293 }
294 /// Compress responses with the given encoding, if the client supports it.
295 #[must_use]
296 pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self {
297 self.send_compression_encodings.enable(encoding);
298 self
299 }
300 /// Limits the maximum size of a decoded message.
301 ///
302 /// Default: `4MB`
303 #[must_use]
304 pub fn max_decoding_message_size(mut self, limit: usize) -> Self {
305 self.max_decoding_message_size = Some(limit);
306 self
307 }
308 /// Limits the maximum size of an encoded message.
309 ///
310 /// Default: `usize::MAX`
311 #[must_use]
312 pub fn max_encoding_message_size(mut self, limit: usize) -> Self {
313 self.max_encoding_message_size = Some(limit);
314 self
315 }
316 }
317 impl<T, B> tonic::codegen::Service<http::Request<B>>
318 for LoadReportingServiceServer<T>
319 where
320 T: LoadReportingService,
321 B: Body + std::marker::Send + 'static,
322 B::Error: Into<StdError> + std::marker::Send + 'static,
323 {
324 type Response = http::Response<tonic::body::BoxBody>;
325 type Error = std::convert::Infallible;
326 type Future = BoxFuture<Self::Response, Self::Error>;
327 fn poll_ready(
328 &mut self,
329 _cx: &mut Context<'_>,
330 ) -> Poll<std::result::Result<(), Self::Error>> {
331 Poll::Ready(Ok(()))
332 }
333 fn call(&mut self, req: http::Request<B>) -> Self::Future {
334 match req.uri().path() {
335 "/envoy.service.load_stats.v3.LoadReportingService/StreamLoadStats" => {
336 #[allow(non_camel_case_types)]
337 struct StreamLoadStatsSvc<T: LoadReportingService>(pub Arc<T>);
338 impl<
339 T: LoadReportingService,
340 > tonic::server::StreamingService<super::LoadStatsRequest>
341 for StreamLoadStatsSvc<T> {
342 type Response = super::LoadStatsResponse;
343 type ResponseStream = T::StreamLoadStatsStream;
344 type Future = BoxFuture<
345 tonic::Response<Self::ResponseStream>,
346 tonic::Status,
347 >;
348 fn call(
349 &mut self,
350 request: tonic::Request<
351 tonic::Streaming<super::LoadStatsRequest>,
352 >,
353 ) -> Self::Future {
354 let inner = Arc::clone(&self.0);
355 let fut = async move {
356 <T as LoadReportingService>::stream_load_stats(
357 &inner,
358 request,
359 )
360 .await
361 };
362 Box::pin(fut)
363 }
364 }
365 let accept_compression_encodings = self.accept_compression_encodings;
366 let send_compression_encodings = self.send_compression_encodings;
367 let max_decoding_message_size = self.max_decoding_message_size;
368 let max_encoding_message_size = self.max_encoding_message_size;
369 let inner = self.inner.clone();
370 let fut = async move {
371 let method = StreamLoadStatsSvc(inner);
372 let codec = tonic::codec::ProstCodec::default();
373 let mut grpc = tonic::server::Grpc::new(codec)
374 .apply_compression_config(
375 accept_compression_encodings,
376 send_compression_encodings,
377 )
378 .apply_max_message_size_config(
379 max_decoding_message_size,
380 max_encoding_message_size,
381 );
382 let res = grpc.streaming(method, req).await;
383 Ok(res)
384 };
385 Box::pin(fut)
386 }
387 _ => {
388 Box::pin(async move {
389 let mut response = http::Response::new(empty_body());
390 let headers = response.headers_mut();
391 headers
392 .insert(
393 tonic::Status::GRPC_STATUS,
394 (tonic::Code::Unimplemented as i32).into(),
395 );
396 headers
397 .insert(
398 http::header::CONTENT_TYPE,
399 tonic::metadata::GRPC_CONTENT_TYPE,
400 );
401 Ok(response)
402 })
403 }
404 }
405 }
406 }
407 impl<T> Clone for LoadReportingServiceServer<T> {
408 fn clone(&self) -> Self {
409 let inner = self.inner.clone();
410 Self {
411 inner,
412 accept_compression_encodings: self.accept_compression_encodings,
413 send_compression_encodings: self.send_compression_encodings,
414 max_decoding_message_size: self.max_decoding_message_size,
415 max_encoding_message_size: self.max_encoding_message_size,
416 }
417 }
418 }
419 /// Generated gRPC service name
420 pub const SERVICE_NAME: &str = "envoy.service.load_stats.v3.LoadReportingService";
421 impl<T> tonic::server::NamedService for LoadReportingServiceServer<T> {
422 const NAME: &'static str = SERVICE_NAME;
423 }
424}