fastly 0.12.0

Fastly Compute API
Documentation
use super::SendError;
use crate::{Backend, Request, Response};

pub mod handle;
pub use handle::{select_handles, PendingRequestHandle, PollHandleResult};

/// A handle to a pending asynchronous request returned by [`Request::send_async()`] or
/// [`Request::send_async_streaming()`].
///
/// A handle can be evaluated using [`PendingRequest::poll()`], [`PendingRequest::wait()`], or
/// [`select`]. It can also be discarded if the request was sent for effects it might have, and the
/// response is unimportant.
pub struct PendingRequest {
    /// The handle to the pending asynchronous request.
    handle: PendingRequestHandle,
    // Metadata that will be attached to the [`Response`] once the handle is finished.
    pub(super) backend: Backend,
    // TODO 2024-07-31: this now forces request headers to be copied an extra
    // time, while it used to be ~free. Once async fn support is added and
    // pending request select is no longer present, we should be able to drop
    // this forced clone and let users clone if needed.
    pub(super) sent_req: Request,
}

impl PendingRequest {
    /// Create a new pending request.
    ///
    /// Note that this constructor is *not* exposed in the public interface. Users should never
    /// directly invoke this constructor, and will receive a pending request by calling
    /// [`Request::send_async()`] or [`Request::send_async_streaming()`].
    pub(super) fn new(handle: PendingRequestHandle, backend: Backend, sent_req: Request) -> Self {
        Self {
            handle,
            backend,
            sent_req,
        }
    }

    /// Try to get the result of a pending request without blocking.
    ///
    /// This function returns immediately with a [`PollResult`]; if you want to block until a result
    /// is ready, use [`PendingRequest::wait()`].
    pub fn poll(self) -> PollResult {
        let Self {
            handle,
            sent_req,
            backend,
        } = self;
        match handle.copy().poll() {
            PollHandleResult::Pending(handle) => {
                PollResult::Pending(Self::new(handle, backend, sent_req))
            }
            PollHandleResult::Done(Ok((resp_handle, resp_body_handle))) => PollResult::Done(Ok(
                Response::from_backend_resp(resp_handle, resp_body_handle, backend, sent_req),
            )),
            PollHandleResult::Done(Err(e)) => {
                PollResult::Done(Err(SendError::new(backend.name(), sent_req, e)))
            }
        }
    }

    /// Block until the result of a pending request is ready.
    ///
    /// If you want check whether the result is ready without blocking, use
    /// [`PendingRequest::poll()`].
    pub fn wait(self) -> Result<Response, SendError> {
        let backend = self.backend;
        let sent_req = self.sent_req;
        match self.handle.copy().wait() {
            Ok((resp, body)) => Ok(Response::from_backend_resp(resp, body, backend, sent_req)),
            Err(e) => Err(SendError::new(backend.name(), sent_req, e)),
        }
    }

    /// Get a reference to the original [`Request`] associated with this pending request.
    ///
    /// Note that the request's original body is already sending, so the returned request does not
    /// have a body.
    pub fn sent_req(&self) -> &Request {
        &self.sent_req
    }
}

/// The result of a call to [`PendingRequest::poll()`].
// Ignoring this clippy lint: changing it would be a breaking change.
#[allow(clippy::large_enum_variant)]
pub enum PollResult {
    /// The request is still in progress, and can be polled again.
    Pending(PendingRequest),
    /// The request has either completed or errored.
    Done(Result<Response, SendError>),
}

/// Given a collection of [`PendingRequest`]s, block until the result of one of the requests is
/// ready.
///
/// This function accepts any type which can become an iterator that yields requests; a common
/// choice is `Vec<PendingRequest>`.
///
/// Returns a tuple `(result, remaining)`, where:
///
/// - `result` is the result of the request that became ready.
///
/// - `remaining` is a vector containing all of the requests that did not become ready. The order of
/// the requests in this vector is not guaranteed to match the order of the requests in the argument
/// collection.
///
/// ### Examples
///
/// **Selecting using the request URI**
///
/// You can use [`Response::get_backend_request()`] to inspect the request that a response came
/// from. This example uses the URL to see which of the two requests finished first:
///
/// ```no_run
/// use fastly::{Error, Request};
/// # fn f() -> Result<(), Error> { // Wrap the example in a function, so we can propagate errors.
///
/// // Send two asynchronous requests, and store the pending requests in a vector.
/// let req1 = Request::get("http://www.origin.org/meow")
///     .send_async("TheOrigin")?;
/// let req2 = Request::get("http://www.origin.org/woof")
///     .send_async("TheOrigin")?;
/// let pending_reqs = vec![req1, req2];
///
/// // Wait for one of the requests to finish.
/// let (resp, _remaining) = fastly::http::request::select(pending_reqs);
///
/// // Return an error if the request was not successful.
/// let resp = resp?;
///
/// // Inspect the response metadata to see which backend this response came from.
/// match resp
///     .get_backend_request()
///     .unwrap()
///     .get_url()
///     .path()
/// {
///     "/meow" => println!("I love cats!"),
///     "/woof" => println!("I love dogs!"),
///     _ => panic!("unexpected result"),
/// }
///
/// # Ok(())
/// # }
/// ```
///
/// **Selecting using the backend name**
///
/// You can also use [`Response::get_backend_name()`] to identify which pending request in the given
/// collection finished. Consider this example, where two requests are sent asynchronously to two
/// different backends:
///
/// ```no_run
/// use fastly::{Error, Request};
/// # fn f() -> Result<(), Error> { // Wrap the example in a function, so we can propagate errors.
///
/// // Send two asynchronous requests, and store the pending requests in a vector.
/// let req1 = Request::get("http://www.origin-1.org/")
///     .send_async("origin1")?;
/// let req2 = Request::get("http://www.origin-2.org/")
///     .send_async("origin2")?;
/// let pending_reqs = vec![req1, req2];
///
/// // Wait for one of the requests to finish.
/// let (resp, _remaining) = fastly::http::request::select(pending_reqs);
///
/// // Return an error if the request was not successful.
/// let resp = resp?;
///
/// // Inspect the response to see which backend this response came from.
/// match resp.get_backend_name().unwrap() {
///     "origin1" => println!("origin 1 responded first!"),
///     "origin2" => println!("origin 2 responded first!"),
///     _ => panic!("unexpected result"),
/// }
///
/// # Ok(())
/// # }
/// ```
///
/// ### Panics
///
/// Panics if the argument collection is empty, or contains more than
/// [`fastly_shared::MAX_PENDING_REQS`] requests.
pub fn select<I>(pending_reqs: I) -> (Result<Response, SendError>, Vec<PendingRequest>)
where
    I: IntoIterator<Item = PendingRequest>,
{
    let mut pending_reqs: Vec<PendingRequest> = pending_reqs.into_iter().collect();
    let (res, i, _) = select_handles(pending_reqs.iter().map(|p| p.handle.copy()));
    let PendingRequest {
        backend, sent_req, ..
    } = pending_reqs.swap_remove(i);
    let res = match res {
        Ok((resp, body)) => Ok(Response::from_backend_resp(resp, body, backend, sent_req)),
        Err(e) => Err(SendError::new(backend.name(), sent_req, e)),
    };
    (res, pending_reqs)
}