hyper/client/conn/http1.rs
1//! HTTP/1 client connections
2
3use std::error::Error as StdError;
4use std::fmt;
5use std::future::Future;
6use std::pin::Pin;
7use std::task::{Context, Poll};
8
9use crate::rt::{Read, Write};
10use bytes::Bytes;
11use futures_util::ready;
12use http::{Request, Response};
13use httparse::ParserConfig;
14
15use super::super::dispatch::{self, TrySendError};
16use crate::body::{Body, Incoming as IncomingBody};
17use crate::proto;
18
19type Dispatcher<T, B> =
20 proto::dispatch::Dispatcher<proto::dispatch::Client<B>, B, T, proto::h1::ClientTransaction>;
21
22/// The sender side of an established connection.
23pub struct SendRequest<B> {
24 dispatch: dispatch::Sender<Request<B>, Response<IncomingBody>>,
25}
26
27/// Deconstructed parts of a `Connection`.
28///
29/// This allows taking apart a `Connection` at a later time, in order to
30/// reclaim the IO object, and additional related pieces.
31#[derive(Debug)]
32#[non_exhaustive]
33pub struct Parts<T> {
34 /// The original IO object used in the handshake.
35 pub io: T,
36 /// A buffer of bytes that have been read but not processed as HTTP.
37 ///
38 /// For instance, if the `Connection` is used for an HTTP upgrade request,
39 /// it is possible the server sent back the first bytes of the new protocol
40 /// along with the response upgrade.
41 ///
42 /// You will want to check for any existing bytes if you plan to continue
43 /// communicating on the IO object.
44 pub read_buf: Bytes,
45}
46
47/// A future that processes all HTTP state for the IO object.
48///
49/// In most cases, this should just be spawned into an executor, so that it
50/// can process incoming and outgoing messages, notice hangups, and the like.
51///
52/// Instances of this type are typically created via the [`handshake`] function
53#[must_use = "futures do nothing unless polled"]
54pub struct Connection<T, B>
55where
56 T: Read + Write,
57 B: Body + 'static,
58{
59 inner: Dispatcher<T, B>,
60}
61
62impl<T, B> Connection<T, B>
63where
64 T: Read + Write + Unpin,
65 B: Body + 'static,
66 B::Error: Into<Box<dyn StdError + Send + Sync>>,
67{
68 /// Return the inner IO object, and additional information.
69 ///
70 /// Only works for HTTP/1 connections. HTTP/2 connections will panic.
71 pub fn into_parts(self) -> Parts<T> {
72 let (io, read_buf, _) = self.inner.into_inner();
73 Parts { io, read_buf }
74 }
75
76 /// Poll the connection for completion, but without calling `shutdown`
77 /// on the underlying IO.
78 ///
79 /// This is useful to allow running a connection while doing an HTTP
80 /// upgrade. Once the upgrade is completed, the connection would be "done",
81 /// but it is not desired to actually shutdown the IO object. Instead you
82 /// would take it back using `into_parts`.
83 ///
84 /// Use [`poll_fn`](https://docs.rs/futures/0.1.25/futures/future/fn.poll_fn.html)
85 /// and [`try_ready!`](https://docs.rs/futures/0.1.25/futures/macro.try_ready.html)
86 /// to work with this function; or use the `without_shutdown` wrapper.
87 pub fn poll_without_shutdown(&mut self, cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
88 self.inner.poll_without_shutdown(cx)
89 }
90
91 /// Prevent shutdown of the underlying IO object at the end of service the request,
92 /// instead run `into_parts`. This is a convenience wrapper over `poll_without_shutdown`.
93 pub async fn without_shutdown(self) -> crate::Result<Parts<T>> {
94 let mut conn = Some(self);
95 futures_util::future::poll_fn(move |cx| -> Poll<crate::Result<Parts<T>>> {
96 ready!(conn.as_mut().unwrap().poll_without_shutdown(cx))?;
97 Poll::Ready(Ok(conn.take().unwrap().into_parts()))
98 })
99 .await
100 }
101}
102
103/// A builder to configure an HTTP connection.
104///
105/// After setting options, the builder is used to create a handshake future.
106///
107/// **Note**: The default values of options are *not considered stable*. They
108/// are subject to change at any time.
109#[derive(Clone, Debug)]
110pub struct Builder {
111 h09_responses: bool,
112 h1_parser_config: ParserConfig,
113 h1_writev: Option<bool>,
114 h1_title_case_headers: bool,
115 h1_preserve_header_case: bool,
116 h1_max_headers: Option<usize>,
117 h1_preserve_header_order: bool,
118 h1_read_buf_exact_size: Option<usize>,
119 h1_max_buf_size: Option<usize>,
120}
121
122/// Returns a handshake future over some IO.
123///
124/// This is a shortcut for `Builder::new().handshake(io)`.
125/// See [`client::conn`](crate::client::conn) for more.
126pub async fn handshake<T, B>(io: T) -> crate::Result<(SendRequest<B>, Connection<T, B>)>
127where
128 T: Read + Write + Unpin,
129 B: Body + 'static,
130 B::Data: Send,
131 B::Error: Into<Box<dyn StdError + Send + Sync>>,
132{
133 Builder::new().handshake(io).await
134}
135
136// ===== impl SendRequest
137
138impl<B> SendRequest<B> {
139 /// Polls to determine whether this sender can be used yet for a request.
140 ///
141 /// If the associated connection is closed, this returns an Error.
142 pub fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<crate::Result<()>> {
143 self.dispatch.poll_ready(cx)
144 }
145
146 /// Waits until the dispatcher is ready
147 ///
148 /// If the associated connection is closed, this returns an Error.
149 pub async fn ready(&mut self) -> crate::Result<()> {
150 futures_util::future::poll_fn(|cx| self.poll_ready(cx)).await
151 }
152
153 /// Checks if the connection is currently ready to send a request.
154 ///
155 /// # Note
156 ///
157 /// This is mostly a hint. Due to inherent latency of networks, it is
158 /// possible that even after checking this is ready, sending a request
159 /// may still fail because the connection was closed in the meantime.
160 pub fn is_ready(&self) -> bool {
161 self.dispatch.is_ready()
162 }
163
164 /// Checks if the connection side has been closed.
165 pub fn is_closed(&self) -> bool {
166 self.dispatch.is_closed()
167 }
168}
169
170impl<B> SendRequest<B>
171where
172 B: Body + 'static,
173{
174 /// Sends a `Request` on the associated connection.
175 ///
176 /// Returns a future that if successful, yields the `Response`.
177 ///
178 /// `req` must have a `Host` header.
179 ///
180 /// # Uri
181 ///
182 /// The `Uri` of the request is serialized as-is.
183 ///
184 /// - Usually you want origin-form (`/path?query`).
185 /// - For sending to an HTTP proxy, you want to send in absolute-form
186 /// (`https://hyper.rs/guides`).
187 ///
188 /// This is however not enforced or validated and it is up to the user
189 /// of this method to ensure the `Uri` is correct for their intended purpose.
190 pub fn send_request(
191 &mut self,
192 req: Request<B>,
193 ) -> impl Future<Output = crate::Result<Response<IncomingBody>>> {
194 let sent = self.dispatch.send(req);
195
196 async move {
197 match sent {
198 Ok(rx) => match rx.await {
199 Ok(Ok(resp)) => Ok(resp),
200 Ok(Err(err)) => Err(err),
201 // this is definite bug if it happens, but it shouldn't happen!
202 Err(_canceled) => panic!("dispatch dropped without returning error"),
203 },
204 Err(_req) => {
205 debug!("connection was not ready");
206 Err(crate::Error::new_canceled().with("connection was not ready"))
207 }
208 }
209 }
210 }
211
212 /// Sends a `Request` on the associated connection.
213 ///
214 /// Returns a future that if successful, yields the `Response`.
215 ///
216 /// # Error
217 ///
218 /// If there was an error before trying to serialize the request to the
219 /// connection, the message will be returned as part of this error.
220 pub fn try_send_request(
221 &mut self,
222 req: Request<B>,
223 ) -> impl Future<Output = Result<Response<IncomingBody>, TrySendError<Request<B>>>> {
224 let sent = self.dispatch.try_send(req);
225 async move {
226 match sent {
227 Ok(rx) => match rx.await {
228 Ok(Ok(res)) => Ok(res),
229 Ok(Err(err)) => Err(err),
230 // this is definite bug if it happens, but it shouldn't happen!
231 Err(_) => panic!("dispatch dropped without returning error"),
232 },
233 Err(req) => {
234 debug!("connection was not ready");
235 let error = crate::Error::new_canceled().with("connection was not ready");
236 Err(TrySendError {
237 error,
238 message: Some(req),
239 })
240 }
241 }
242 }
243 }
244}
245
246impl<B> fmt::Debug for SendRequest<B> {
247 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
248 f.debug_struct("SendRequest").finish()
249 }
250}
251
252// ===== impl Connection
253
254impl<T, B> Connection<T, B>
255where
256 T: Read + Write + Unpin + Send,
257 B: Body + 'static,
258 B::Error: Into<Box<dyn StdError + Send + Sync>>,
259{
260 /// Enable this connection to support higher-level HTTP upgrades.
261 ///
262 /// See [the `upgrade` module](crate::upgrade) for more.
263 pub fn with_upgrades(self) -> upgrades::UpgradeableConnection<T, B> {
264 upgrades::UpgradeableConnection { inner: Some(self) }
265 }
266}
267
268impl<T, B> fmt::Debug for Connection<T, B>
269where
270 T: Read + Write + fmt::Debug,
271 B: Body + 'static,
272{
273 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
274 f.debug_struct("Connection").finish()
275 }
276}
277
278impl<T, B> Future for Connection<T, B>
279where
280 T: Read + Write + Unpin,
281 B: Body + 'static,
282 B::Data: Send,
283 B::Error: Into<Box<dyn StdError + Send + Sync>>,
284{
285 type Output = crate::Result<()>;
286
287 fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
288 match ready!(Pin::new(&mut self.inner).poll(cx))? {
289 proto::Dispatched::Shutdown => Poll::Ready(Ok(())),
290 proto::Dispatched::Upgrade(pending) => {
291 // With no `Send` bound on `I`, we can't try to do
292 // upgrades here. In case a user was trying to use
293 // `upgrade` with this API, send a special
294 // error letting them know about that.
295 pending.manual();
296 Poll::Ready(Ok(()))
297 }
298 }
299 }
300}
301
302// ===== impl Builder
303
304impl Builder {
305 /// Creates a new connection builder.
306 #[inline]
307 pub fn new() -> Builder {
308 Builder {
309 h09_responses: false,
310 h1_writev: None,
311 h1_read_buf_exact_size: None,
312 h1_parser_config: Default::default(),
313 h1_title_case_headers: false,
314 h1_preserve_header_case: false,
315 h1_max_headers: None,
316 h1_preserve_header_order: false,
317 h1_max_buf_size: None,
318 }
319 }
320
321 /// Set whether HTTP/0.9 responses should be tolerated.
322 ///
323 /// Default is false.
324 pub fn http09_responses(&mut self, enabled: bool) -> &mut Builder {
325 self.h09_responses = enabled;
326 self
327 }
328
329 /// Set whether HTTP/1 connections will accept spaces between header names
330 /// and the colon that follow them in responses.
331 ///
332 /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has
333 /// to say about it:
334 ///
335 /// > No whitespace is allowed between the header field-name and colon. In
336 /// > the past, differences in the handling of such whitespace have led to
337 /// > security vulnerabilities in request routing and response handling. A
338 /// > server MUST reject any received request message that contains
339 /// > whitespace between a header field-name and colon with a response code
340 /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a
341 /// > response message before forwarding the message downstream.
342 ///
343 /// Default is false.
344 ///
345 /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4
346 pub fn allow_spaces_after_header_name_in_responses(&mut self, enabled: bool) -> &mut Builder {
347 self.h1_parser_config
348 .allow_spaces_after_header_name_in_responses(enabled);
349 self
350 }
351
352 /// Set whether HTTP/1 connections will accept obsolete line folding for
353 /// header values.
354 ///
355 /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when
356 /// parsing.
357 ///
358 /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has
359 /// to say about it:
360 ///
361 /// > A server that receives an obs-fold in a request message that is not
362 /// > within a message/http container MUST either reject the message by
363 /// > sending a 400 (Bad Request), preferably with a representation
364 /// > explaining that obsolete line folding is unacceptable, or replace
365 /// > each received obs-fold with one or more SP octets prior to
366 /// > interpreting the field value or forwarding the message downstream.
367 ///
368 /// > A proxy or gateway that receives an obs-fold in a response message
369 /// > that is not within a message/http container MUST either discard the
370 /// > message and replace it with a 502 (Bad Gateway) response, preferably
371 /// > with a representation explaining that unacceptable line folding was
372 /// > received, or replace each received obs-fold with one or more SP
373 /// > octets prior to interpreting the field value or forwarding the
374 /// > message downstream.
375 ///
376 /// > A user agent that receives an obs-fold in a response message that is
377 /// > not within a message/http container MUST replace each received
378 /// > obs-fold with one or more SP octets prior to interpreting the field
379 /// > value.
380 ///
381 /// Default is false.
382 ///
383 /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4
384 pub fn allow_obsolete_multiline_headers_in_responses(&mut self, enabled: bool) -> &mut Builder {
385 self.h1_parser_config
386 .allow_obsolete_multiline_headers_in_responses(enabled);
387 self
388 }
389
390 /// Set whether HTTP/1 connections will silently ignored malformed header lines.
391 ///
392 /// If this is enabled and a header line does not start with a valid header
393 /// name, or does not include a colon at all, the line will be silently ignored
394 /// and no error will be reported.
395 ///
396 /// Default is false.
397 pub fn ignore_invalid_headers_in_responses(&mut self, enabled: bool) -> &mut Builder {
398 self.h1_parser_config
399 .ignore_invalid_headers_in_responses(enabled);
400 self
401 }
402
403 /// Set whether HTTP/1 connections should try to use vectored writes,
404 /// or always flatten into a single buffer.
405 ///
406 /// Note that setting this to false may mean more copies of body data,
407 /// but may also improve performance when an IO transport doesn't
408 /// support vectored writes well, such as most TLS implementations.
409 ///
410 /// Setting this to true will force hyper to use queued strategy
411 /// which may eliminate unnecessary cloning on some TLS backends
412 ///
413 /// Default is `auto`. In this mode hyper will try to guess which
414 /// mode to use
415 pub fn writev(&mut self, enabled: bool) -> &mut Builder {
416 self.h1_writev = Some(enabled);
417 self
418 }
419
420 /// Set whether HTTP/1 connections will write header names as title case at
421 /// the socket level.
422 ///
423 /// Default is false.
424 pub fn title_case_headers(&mut self, enabled: bool) -> &mut Builder {
425 self.h1_title_case_headers = enabled;
426 self
427 }
428
429 /// Set whether to support preserving original header cases.
430 ///
431 /// Currently, this will record the original cases received, and store them
432 /// in a private extension on the `Response`. It will also look for and use
433 /// such an extension in any provided `Request`.
434 ///
435 /// Since the relevant extension is still private, there is no way to
436 /// interact with the original cases. The only effect this can have now is
437 /// to forward the cases in a proxy-like fashion.
438 ///
439 /// Default is false.
440 pub fn preserve_header_case(&mut self, enabled: bool) -> &mut Builder {
441 self.h1_preserve_header_case = enabled;
442 self
443 }
444
445 /// Set the maximum number of headers.
446 ///
447 /// When a response is received, the parser will reserve a buffer to store headers for optimal
448 /// performance.
449 ///
450 /// If client receives more headers than the buffer size, the error "message header too large"
451 /// is returned.
452 ///
453 /// Note that headers is allocated on the stack by default, which has higher performance. After
454 /// setting this value, headers will be allocated in heap memory, that is, heap memory
455 /// allocation will occur for each response, and there will be a performance drop of about 5%.
456 ///
457 /// Default is 100.
458 pub fn max_headers(&mut self, val: usize) -> &mut Self {
459 self.h1_max_headers = Some(val);
460 self
461 }
462
463 /// Set whether to support preserving original header order.
464 ///
465 /// Currently, this will record the order in which headers are received, and store this
466 /// ordering in a private extension on the `Response`. It will also look for and use
467 /// such an extension in any provided `Request`.
468 ///
469 /// Default is false.
470 pub fn preserve_header_order(&mut self, enabled: bool) -> &mut Builder {
471 self.h1_preserve_header_order = enabled;
472 self
473 }
474
475 /// Sets the exact size of the read buffer to *always* use.
476 ///
477 /// Note that setting this option unsets the `max_buf_size` option.
478 ///
479 /// Default is an adaptive read buffer.
480 pub fn read_buf_exact_size(&mut self, sz: Option<usize>) -> &mut Builder {
481 self.h1_read_buf_exact_size = sz;
482 self.h1_max_buf_size = None;
483 self
484 }
485
486 /// Set the maximum buffer size for the connection.
487 ///
488 /// Default is ~400kb.
489 ///
490 /// Note that setting this option unsets the `read_exact_buf_size` option.
491 ///
492 /// # Panics
493 ///
494 /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum.
495 pub fn max_buf_size(&mut self, max: usize) -> &mut Self {
496 assert!(
497 max >= proto::h1::MINIMUM_MAX_BUFFER_SIZE,
498 "the max_buf_size cannot be smaller than the minimum that h1 specifies."
499 );
500
501 self.h1_max_buf_size = Some(max);
502 self.h1_read_buf_exact_size = None;
503 self
504 }
505
506 /// Constructs a connection with the configured options and IO.
507 /// See [`client::conn`](crate::client::conn) for more.
508 ///
509 /// Note, if [`Connection`] is not `await`-ed, [`SendRequest`] will
510 /// do nothing.
511 pub fn handshake<T, B>(
512 &self,
513 io: T,
514 ) -> impl Future<Output = crate::Result<(SendRequest<B>, Connection<T, B>)>>
515 where
516 T: Read + Write + Unpin,
517 B: Body + 'static,
518 B::Data: Send,
519 B::Error: Into<Box<dyn StdError + Send + Sync>>,
520 {
521 let opts = self.clone();
522
523 async move {
524 trace!("client handshake HTTP/1");
525
526 let (tx, rx) = dispatch::channel();
527 let mut conn = proto::Conn::new(io);
528 conn.set_h1_parser_config(opts.h1_parser_config);
529 if let Some(writev) = opts.h1_writev {
530 if writev {
531 conn.set_write_strategy_queue();
532 } else {
533 conn.set_write_strategy_flatten();
534 }
535 }
536 if opts.h1_title_case_headers {
537 conn.set_title_case_headers();
538 }
539 if opts.h1_preserve_header_case {
540 conn.set_preserve_header_case();
541 }
542 if let Some(max_headers) = opts.h1_max_headers {
543 conn.set_http1_max_headers(max_headers);
544 }
545 if opts.h1_preserve_header_order {
546 conn.set_preserve_header_order();
547 }
548
549 if opts.h09_responses {
550 conn.set_h09_responses();
551 }
552
553 if let Some(sz) = opts.h1_read_buf_exact_size {
554 conn.set_read_buf_exact_size(sz);
555 }
556 if let Some(max) = opts.h1_max_buf_size {
557 conn.set_max_buf_size(max);
558 }
559 let cd = proto::h1::dispatch::Client::new(rx);
560 let proto = proto::h1::Dispatcher::new(cd, conn);
561
562 Ok((SendRequest { dispatch: tx }, Connection { inner: proto }))
563 }
564 }
565}
566
567mod upgrades {
568 use crate::upgrade::Upgraded;
569
570 use super::*;
571
572 // A future binding a connection with a Service with Upgrade support.
573 //
574 // This type is unnameable outside the crate.
575 #[must_use = "futures do nothing unless polled"]
576 #[allow(missing_debug_implementations)]
577 pub struct UpgradeableConnection<T, B>
578 where
579 T: Read + Write + Unpin + Send + 'static,
580 B: Body + 'static,
581 B::Error: Into<Box<dyn StdError + Send + Sync>>,
582 {
583 pub(super) inner: Option<Connection<T, B>>,
584 }
585
586 impl<I, B> Future for UpgradeableConnection<I, B>
587 where
588 I: Read + Write + Unpin + Send + 'static,
589 B: Body + 'static,
590 B::Data: Send,
591 B::Error: Into<Box<dyn StdError + Send + Sync>>,
592 {
593 type Output = crate::Result<()>;
594
595 fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
596 match ready!(Pin::new(&mut self.inner.as_mut().unwrap().inner).poll(cx)) {
597 Ok(proto::Dispatched::Shutdown) => Poll::Ready(Ok(())),
598 Ok(proto::Dispatched::Upgrade(pending)) => {
599 let Parts { io, read_buf } = self.inner.take().unwrap().into_parts();
600 pending.fulfill(Upgraded::new(io, read_buf));
601 Poll::Ready(Ok(()))
602 }
603 Err(e) => Poll::Ready(Err(e)),
604 }
605 }
606 }
607}