1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
//! This library provides the [`AsyncHttpRangeReader`] type.
//!
//! It allows streaming a file over HTTP while also allow random access. The type implements both
//! [`AsyncRead`] as well as [`AsyncSeek`]. This is supported through the use of range requests.
//! Each individual read will request a portion of the file using an HTTP range request.
//!
//! Requesting numerous small reads might turn out to be relatively slow because each reads needs to
//! perform an HTTP request. To alleviate this issue [`AsyncHttpRangeReader::prefetch`] is provided.
//! Using this method you can *prefect* a number of bytes which will be streamed in on the
//! background. If a read operation is reading from already (pre)fetched ranges it will stream from
//! the internal cache instead.
//!
//! Internally the [`AsyncHttpRangeReader`] stores a memory map which allows sparsely reading the
//! data into memory without actually requiring all memory for file to be resident in memory.
//!
//! The primary use-case for this library is to be able to sparsely stream a zip archive over HTTP
//! but its designed in a generic fashion.

mod error;
mod sparse_range;

use futures::{FutureExt, Stream, StreamExt};
use http_content_range::{ContentRange, ContentRangeBytes};
use memmap2::MmapMut;
use reqwest::header::HeaderMap;
use reqwest::{Response, Url};
use sparse_range::SparseRange;
use std::{
    io::{self, ErrorKind, SeekFrom},
    ops::Range,
    pin::Pin,
    sync::Arc,
    task::{ready, Context, Poll},
};
use tokio::{
    io::{AsyncRead, AsyncSeek, ReadBuf},
    sync::watch::Sender,
    sync::{watch, Mutex},
};
use tokio_stream::wrappers::WatchStream;
use tokio_util::sync::PollSender;
use tracing::{info_span, Instrument};

pub use error::AsyncHttpRangeReaderError;

/// An `AsyncRangeReader` enables reading from a file over HTTP using range requests.
///
/// See the [`crate`] level documentation for more information.
///
/// The general entrypoint is [`AsyncHttpRangeReader::new`]. Depending on the
/// [`CheckSupportMethod`], this will either call [`AsyncHttpRangeReader::initial_tail_request`] or
/// [`AsyncHttpRangeReader::initial_head_request`] to send the initial request and then
/// [`AsyncHttpRangeReader::from_tail_response`] or [`AsyncHttpRangeReader::from_head_response`] to
/// initialize the async reader. If you want to apply a caching layer, you can send the initial head
/// (or tail) request yourself with your cache headers (e.g. through the
/// [http-cache-semantics](https://docs.rs/http-cache-semantics) crate):
///
/// ```rust
/// # use url::Url;
/// # use async_http_range_reader::{AsyncHttpRangeReader, AsyncHttpRangeReaderError};
/// # use reqwest::header::HeaderMap;
/// async fn get_reader_cached(
///     url: Url,
/// ) -> Result<Option<AsyncHttpRangeReader>, AsyncHttpRangeReaderError> {
///     let etag = "63c550e8-5ae";
///     let client = reqwest::Client::new();
///     let response = client
///         .head(url.clone())
///         .header(reqwest::header::IF_NONE_MATCH, etag)
///         .send()
///         .await?;
///     if response.status() == reqwest::StatusCode::NOT_MODIFIED {
///         Ok(None)
///     } else {
///         let reader = AsyncHttpRangeReader::from_head_response(client, response, url, HeaderMap::default()).await?;
///         Ok(Some(reader))
///     }
/// }
/// ```
#[derive(Debug)]
pub struct AsyncHttpRangeReader {
    inner: Mutex<Inner>,
    len: u64,
}

#[derive(Default, Clone, Debug)]
struct StreamerState {
    resident_range: SparseRange,
    requested_ranges: Vec<Range<u64>>,
    error: Option<AsyncHttpRangeReaderError>,
}

#[derive(Debug)]
struct Inner {
    /// A read-only view on the memory mapped data. The `downloaded_range` indicates the regions of
    /// memory that contain bytes that have been downloaded.
    data: &'static [u8],

    /// The current read position in the stream
    pos: u64,

    /// The range of bytes that have been requested for download
    requested_range: SparseRange,

    /// The range of bytes that have actually been downloaded to `data`.
    streamer_state: StreamerState,

    /// A channel receiver that holds the last downloaded range (or an error) from the background
    /// task.
    streamer_state_rx: WatchStream<StreamerState>,

    /// A channel sender to send range requests to the background task
    request_tx: tokio::sync::mpsc::Sender<Range<u64>>,

    /// An optional object to reserve a slot in the `request_tx` sender. When in the process of
    /// sending a requests this contains an actual value.
    poll_request_tx: Option<PollSender<Range<u64>>>,
}

/// For the initial request, we support either directly requesting N bytes from the end for file
/// or, if you the server doesn't support negative byte offsets, starting with a HEAD request
/// instead
pub enum CheckSupportMethod {
    /// Perform a range request with a negative byte range. This will return the N bytes from the
    /// *end* of the file as well as the file-size. This is especially useful to also immediately
    /// get some bytes from the end of the file.
    NegativeRangeRequest(u64),

    /// Perform a head request to get the length of the file and check if the server supports range
    /// requests.
    Head,
}

fn error_for_status(response: reqwest::Response) -> reqwest_middleware::Result<Response> {
    response
        .error_for_status()
        .map_err(reqwest_middleware::Error::Reqwest)
}

impl AsyncHttpRangeReader {
    /// Construct a new `AsyncHttpRangeReader`.
    pub async fn new(
        client: impl Into<reqwest_middleware::ClientWithMiddleware>,
        url: Url,
        check_method: CheckSupportMethod,
        extra_headers: HeaderMap,
    ) -> Result<(Self, HeaderMap), AsyncHttpRangeReaderError> {
        let client = client.into();
        match check_method {
            CheckSupportMethod::NegativeRangeRequest(initial_chunk_size) => {
                let response = Self::initial_tail_request(
                    client.clone(),
                    url.clone(),
                    initial_chunk_size,
                    HeaderMap::default(),
                )
                .await?;
                let response_headers = response.headers().clone();
                let self_ = Self::from_tail_response(client, response, url, extra_headers).await?;
                Ok((self_, response_headers))
            }
            CheckSupportMethod::Head => {
                let response =
                    Self::initial_head_request(client.clone(), url.clone(), HeaderMap::default())
                        .await?;
                let response_headers = response.headers().clone();
                let self_ = Self::from_head_response(client, response, url, extra_headers).await?;
                Ok((self_, response_headers))
            }
        }
    }

    /// Send an initial range request to determine if the remote accepts range
    /// requests. This will return a number of bytes from the end of the stream. Use the
    /// `initial_chunk_size` parameter to define how many bytes should be requested from the end.
    pub async fn initial_tail_request(
        client: impl Into<reqwest_middleware::ClientWithMiddleware>,
        url: reqwest::Url,
        initial_chunk_size: u64,
        extra_headers: HeaderMap,
    ) -> Result<Response, AsyncHttpRangeReaderError> {
        let client = client.into();
        let tail_response = client
            .get(url)
            .header(
                reqwest::header::RANGE,
                format!("bytes=-{initial_chunk_size}"),
            )
            .headers(extra_headers)
            .send()
            .await
            .and_then(error_for_status)
            .map_err(Arc::new)
            .map_err(AsyncHttpRangeReaderError::HttpError)?;
        Ok(tail_response)
    }

    /// Initialize the reader from [`AsyncHttpRangeReader::initial_tail_request`] (or a user
    /// provided response that also has a range of bytes from the end as body)
    pub async fn from_tail_response(
        client: impl Into<reqwest_middleware::ClientWithMiddleware>,
        tail_request_response: Response,
        url: Url,
        extra_headers: HeaderMap,
    ) -> Result<Self, AsyncHttpRangeReaderError> {
        let client = client.into();

        // Get the size of the file from this initial request
        let content_range = ContentRange::parse(
            tail_request_response
                .headers()
                .get(reqwest::header::CONTENT_RANGE)
                .ok_or(AsyncHttpRangeReaderError::ContentRangeMissing)?
                .to_str()
                .map_err(|_err| AsyncHttpRangeReaderError::ContentRangeMissing)?,
        );
        let (start, finish, complete_length) = match content_range {
            ContentRange::Bytes(ContentRangeBytes {
                first_byte,
                last_byte,
                complete_length,
            }) => (first_byte, last_byte, complete_length),
            _ => return Err(AsyncHttpRangeReaderError::HttpRangeRequestUnsupported),
        };

        // Allocate a memory map to hold the data
        let memory_map = memmap2::MmapOptions::new()
            .len(complete_length as usize)
            .map_anon()
            .map_err(Arc::new)
            .map_err(AsyncHttpRangeReaderError::MemoryMapError)?;

        // SAFETY: Get a read-only slice to the memory. This is safe because the memory map is never
        // reallocated and we keep track of the initialized part.
        let memory_map_slice =
            unsafe { std::slice::from_raw_parts(memory_map.as_ptr(), memory_map.len()) };

        let requested_range =
            SparseRange::from_range(complete_length - (finish - start)..complete_length);

        // adding more than 2 entries to the channel would block the sender. I assumed two would
        // suffice because I would want to 1) prefetch a certain range and 2) read stuff via the
        // AsyncRead implementation. Any extra would simply have to wait for one of these to
        // succeed. I eventually used 10 because who cares.
        let (request_tx, request_rx) = tokio::sync::mpsc::channel(10);
        let (state_tx, state_rx) = watch::channel(StreamerState::default());
        tokio::spawn(run_streamer(
            client,
            url,
            extra_headers,
            Some((tail_request_response, start)),
            memory_map,
            state_tx,
            request_rx,
        ));

        // Configure the initial state of the streamer.
        let mut streamer_state = StreamerState::default();
        streamer_state
            .requested_ranges
            .push(complete_length - (finish - start)..complete_length);

        let reader = Self {
            len: memory_map_slice.len() as u64,
            inner: Mutex::new(Inner {
                data: memory_map_slice,
                pos: 0,
                requested_range,
                streamer_state,
                streamer_state_rx: WatchStream::new(state_rx),
                request_tx,
                poll_request_tx: None,
            }),
        };
        Ok(reader)
    }

    /// Send an initial range request to determine if the remote accepts range
    /// requests and get the content length
    pub async fn initial_head_request(
        client: impl Into<reqwest_middleware::ClientWithMiddleware>,
        url: reqwest::Url,
        extra_headers: HeaderMap,
    ) -> Result<Response, AsyncHttpRangeReaderError> {
        let client = client.into();

        // Perform a HEAD request to get the content-length.
        let head_response = client
            .head(url.clone())
            .headers(extra_headers)
            .send()
            .await
            .and_then(error_for_status)
            .map_err(Arc::new)
            .map_err(AsyncHttpRangeReaderError::HttpError)?;
        Ok(head_response)
    }

    /// Initialize the reader from [`AsyncHttpRangeReader::initial_head_request`] (or a user
    /// provided response the)
    pub async fn from_head_response(
        client: impl Into<reqwest_middleware::ClientWithMiddleware>,
        head_response: Response,
        url: Url,
        extra_headers: HeaderMap,
    ) -> Result<Self, AsyncHttpRangeReaderError> {
        let client = client.into();

        // Are range requests supported?
        if head_response
            .headers()
            .get(reqwest::header::ACCEPT_RANGES)
            .and_then(|h| h.to_str().ok())
            != Some("bytes")
        {
            return Err(AsyncHttpRangeReaderError::HttpRangeRequestUnsupported);
        }

        let content_length: u64 = head_response
            .headers()
            .get(reqwest::header::CONTENT_LENGTH)
            .ok_or(AsyncHttpRangeReaderError::ContentLengthMissing)?
            .to_str()
            .map_err(|_err| AsyncHttpRangeReaderError::ContentLengthMissing)?
            .parse()
            .map_err(|_err| AsyncHttpRangeReaderError::ContentLengthMissing)?;

        // Allocate a memory map to hold the data
        let memory_map = memmap2::MmapOptions::new()
            .len(content_length as _)
            .map_anon()
            .map_err(Arc::new)
            .map_err(AsyncHttpRangeReaderError::MemoryMapError)?;

        // SAFETY: Get a read-only slice to the memory. This is safe because the memory map is never
        // reallocated and we keep track of the initialized part.
        let memory_map_slice =
            unsafe { std::slice::from_raw_parts(memory_map.as_ptr(), memory_map.len()) };

        let requested_range = SparseRange::default();

        // adding more than 2 entries to the channel would block the sender. I assumed two would
        // suffice because I would want to 1) prefetch a certain range and 2) read stuff via the
        // AsyncRead implementation. Any extra would simply have to wait for one of these to
        // succeed. I eventually used 10 because who cares.
        let (request_tx, request_rx) = tokio::sync::mpsc::channel(10);
        let (state_tx, state_rx) = watch::channel(StreamerState::default());
        tokio::spawn(run_streamer(
            client,
            url,
            extra_headers,
            None,
            memory_map,
            state_tx,
            request_rx,
        ));

        // Configure the initial state of the streamer.
        let streamer_state = StreamerState::default();

        let reader = Self {
            len: memory_map_slice.len() as u64,
            inner: Mutex::new(Inner {
                data: memory_map_slice,
                pos: 0,
                requested_range,
                streamer_state,
                streamer_state_rx: WatchStream::new(state_rx),
                request_tx,
                poll_request_tx: None,
            }),
        };
        Ok(reader)
    }

    /// Returns the ranges that this instance actually performed HTTP requests for.
    pub async fn requested_ranges(&self) -> Vec<Range<u64>> {
        let mut inner = self.inner.lock().await;
        if let Some(Some(new_state)) = inner.streamer_state_rx.next().now_or_never() {
            inner.streamer_state = new_state;
        }
        inner.streamer_state.requested_ranges.clone()
    }

    /// Prefetches a range of bytes from the remote. When specifying a large range this can
    /// drastically reduce the number of requests required to the server.
    pub async fn prefetch(&mut self, bytes: Range<u64>) {
        let inner = self.inner.get_mut();

        // Ensure the range is withing the file size and non-zero of length.
        let range = bytes.start..(bytes.end.min(inner.data.len() as u64));
        if range.start >= range.end {
            return;
        }

        // Check if the range has been requested or not.
        let inner = self.inner.get_mut();
        if let Some((new_range, _)) = inner.requested_range.cover(range.clone()) {
            let _ = inner.request_tx.send(range).await;
            inner.requested_range = new_range;
        }
    }

    /// Returns the length of the stream in bytes
    #[allow(clippy::len_without_is_empty)]
    pub fn len(&self) -> u64 {
        self.len
    }
}

/// A task that will download parts from the remote archive and "send" them to the frontend as they
/// become available.
#[tracing::instrument(name = "fetch_ranges", skip_all, fields(url))]
async fn run_streamer(
    client: reqwest_middleware::ClientWithMiddleware,
    url: Url,
    extra_headers: HeaderMap,
    initial_tail_response: Option<(Response, u64)>,
    mut memory_map: MmapMut,
    mut state_tx: Sender<StreamerState>,
    mut request_rx: tokio::sync::mpsc::Receiver<Range<u64>>,
) {
    let mut state = StreamerState::default();

    if let Some((response, response_start)) = initial_tail_response {
        // Add the initial range to the state
        state
            .requested_ranges
            .push(response_start..memory_map.len() as u64);

        // Stream the initial data in memory
        if !stream_response(
            response,
            response_start,
            &mut memory_map,
            &mut state_tx,
            &mut state,
        )
        .await
        {
            return;
        }
    }

    // Listen for any new incoming requests
    'outer: loop {
        let range = match request_rx.recv().await {
            Some(range) => range,
            None => {
                break 'outer;
            }
        };

        // Determine the range that we need to cover
        let uncovered_ranges = match state.resident_range.cover(range) {
            None => continue,
            Some((_, uncovered_ranges)) => uncovered_ranges,
        };

        // Download and stream each range.
        for range in uncovered_ranges {
            // Update the requested ranges
            state
                .requested_ranges
                .push(*range.start()..*range.end() + 1);

            // Execute the request
            let range_string = format!("bytes={}-{}", range.start(), range.end());
            let span = info_span!("fetch_range", range = range_string.as_str());
            let response = match client
                .get(url.clone())
                .header(reqwest::header::RANGE, range_string)
                .headers(extra_headers.clone())
                .send()
                .instrument(span)
                .await
                .and_then(error_for_status)
                .map_err(|e| std::io::Error::new(ErrorKind::Other, e))
            {
                Err(e) => {
                    state.error = Some(e.into());
                    let _ = state_tx.send(state);
                    break 'outer;
                }
                Ok(response) => response,
            };

            // If the server returns a successful, but non-206 response (e.g., 200), then it
            // doesn't support range requests (even if the `Accept-Ranges` header is set).
            if response.status() != reqwest::StatusCode::PARTIAL_CONTENT {
                state.error = Some(AsyncHttpRangeReaderError::HttpRangeRequestUnsupported);
                let _ = state_tx.send(state);
                break 'outer;
            }

            if !stream_response(
                response,
                *range.start(),
                &mut memory_map,
                &mut state_tx,
                &mut state,
            )
            .await
            {
                break 'outer;
            }
        }
    }
}

/// Streams the data from the specified response to the memory map updating progress in between.
/// Returns `true` if everything went fine, `false` if anything went wrong. The error state, if any,
/// is stored in `state_tx` so the "frontend" will consume it.
async fn stream_response(
    tail_request_response: Response,
    mut offset: u64,
    memory_map: &mut MmapMut,
    state_tx: &mut Sender<StreamerState>,
    state: &mut StreamerState,
) -> bool {
    let mut byte_stream = tail_request_response.bytes_stream();
    while let Some(bytes) = byte_stream.next().await {
        let bytes = match bytes {
            Err(e) => {
                state.error = Some(e.into());
                let _ = state_tx.send(state.clone());
                return false;
            }
            Ok(bytes) => bytes,
        };

        // Determine the range of these bytes in the complete file
        let byte_range = offset..offset + bytes.len() as u64;

        // Update the offset
        offset = byte_range.end;

        // Copy the data from the stream to memory
        memory_map[byte_range.start as usize..byte_range.end as usize]
            .copy_from_slice(bytes.as_ref());

        // Update the range of bytes that have been downloaded
        state.resident_range.update(byte_range);

        // Notify anyone that's listening that we have downloaded some extra data
        if state_tx.send(state.clone()).is_err() {
            // If we failed to set the state it means there is no receiver. In that case we should
            // just exit.
            return false;
        }
    }

    true
}

impl AsyncSeek for AsyncHttpRangeReader {
    fn start_seek(self: Pin<&mut Self>, position: SeekFrom) -> io::Result<()> {
        let me = self.get_mut();
        let inner = me.inner.get_mut();

        inner.pos = match position {
            SeekFrom::Start(pos) => pos,
            SeekFrom::End(relative) => (inner.data.len() as i64).saturating_add(relative) as u64,
            SeekFrom::Current(relative) => (inner.pos as i64).saturating_add(relative) as u64,
        };

        Ok(())
    }

    fn poll_complete(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<u64>> {
        let inner = self.inner.get_mut();
        Poll::Ready(Ok(inner.pos))
    }
}

impl AsyncRead for AsyncHttpRangeReader {
    fn poll_read(
        self: Pin<&mut Self>,
        cx: &mut Context<'_>,
        buf: &mut ReadBuf<'_>,
    ) -> Poll<io::Result<()>> {
        let me = self.get_mut();
        let inner = me.inner.get_mut();

        // If a previous error occurred we return that.
        if let Some(e) = inner.streamer_state.error.as_ref() {
            return Poll::Ready(Err(io::Error::new(io::ErrorKind::Other, e.clone())));
        }

        // Determine the range to be fetched
        let range = inner.pos..(inner.pos + buf.remaining() as u64).min(inner.data.len() as u64);
        if range.start >= range.end {
            return Poll::Ready(Ok(()));
        }

        // Ensure we requested the required bytes
        while !inner.requested_range.is_covered(range.clone()) {
            // If there is an active range request wait for it to complete
            if let Some(mut poll) = inner.poll_request_tx.take() {
                match poll.poll_reserve(cx) {
                    Poll::Ready(_) => {
                        let _ = poll.send_item(range.clone());
                        inner.requested_range.update(range.clone());
                        break;
                    }
                    Poll::Pending => {
                        inner.poll_request_tx = Some(poll);
                        return Poll::Pending;
                    }
                }
            }

            // Request the range
            inner.poll_request_tx = Some(PollSender::new(inner.request_tx.clone()));
        }

        // If there is still a request poll open but there is no need for a request, abort it.
        if let Some(mut poll) = inner.poll_request_tx.take() {
            poll.abort_send();
        }

        loop {
            // Is the range already available?
            if inner
                .streamer_state
                .resident_range
                .is_covered(range.clone())
            {
                let len = (range.end - range.start) as usize;
                buf.initialize_unfilled_to(len)
                    .copy_from_slice(&inner.data[range.start as usize..range.end as usize]);
                buf.advance(len);
                inner.pos += len as u64;
                return Poll::Ready(Ok(()));
            }

            // Otherwise wait for new data to come in
            match ready!(Pin::new(&mut inner.streamer_state_rx).poll_next(cx)) {
                None => unreachable!(),
                Some(state) => {
                    inner.streamer_state = state;
                    if let Some(e) = inner.streamer_state.error.as_ref() {
                        return Poll::Ready(Err(io::Error::new(io::ErrorKind::Other, e.clone())));
                    }
                }
            }
        }
    }
}

#[cfg(test)]
mod static_directory_server;

#[cfg(test)]
mod test {
    use super::*;
    use crate::static_directory_server::StaticDirectoryServer;
    use assert_matches::assert_matches;
    use async_zip::tokio::read::seek::ZipFileReader;
    use futures::AsyncReadExt;
    use reqwest::{Client, StatusCode};
    use rstest::*;
    use std::path::Path;
    use tokio::io::AsyncReadExt as _;
    use tokio_util::compat::TokioAsyncReadCompatExt;

    #[rstest]
    #[case(CheckSupportMethod::Head)]
    #[case(CheckSupportMethod::NegativeRangeRequest(8192))]
    #[tokio::test]
    async fn async_range_reader_zip(#[case] check_method: CheckSupportMethod) {
        // Spawn a static file server
        let path = Path::new(&std::env::var("CARGO_MANIFEST_DIR").unwrap()).join("test-data");
        let server = StaticDirectoryServer::new(&path)
            .await
            .expect("could not initialize server");

        // check that file is there and has the right size
        let filepath = path.join("andes-1.8.3-pyhd8ed1ab_0.conda");
        assert!(
            filepath.exists(),
            "The conda package is not there yet. Did you run `git lfs pull`?"
        );
        let file_size = std::fs::metadata(&filepath).unwrap().len();
        assert_eq!(
            file_size, 2_463_995,
            "The conda package is not there yet. Did you run `git lfs pull`?"
        );

        // Construct an AsyncRangeReader
        let (mut range, _) = AsyncHttpRangeReader::new(
            Client::new(),
            server.url().join("andes-1.8.3-pyhd8ed1ab_0.conda").unwrap(),
            check_method,
            HeaderMap::default(),
        )
        .await
        .expect("Could not download range - did you run `git lfs pull`?");

        // Make sure we have read the last couple of bytes
        range.prefetch(range.len() - 8192..range.len()).await;

        assert_eq!(range.len(), file_size);

        let mut reader = ZipFileReader::new(range.compat()).await.unwrap();

        assert_eq!(
            reader
                .file()
                .entries()
                .iter()
                .map(|e| e.entry().filename().as_str().unwrap_or(""))
                .collect::<Vec<_>>(),
            vec![
                "metadata.json",
                "info-andes-1.8.3-pyhd8ed1ab_0.tar.zst",
                "pkg-andes-1.8.3-pyhd8ed1ab_0.tar.zst",
            ]
        );

        // Get the number of performed requests so far
        let request_ranges = reader.inner_mut().get_mut().requested_ranges().await;
        assert_eq!(request_ranges.len(), 1);
        assert_eq!(
            request_ranges[0].end - request_ranges[0].start,
            8192,
            "first request should be the size of the initial chunk size"
        );
        assert_eq!(
            request_ranges[0].end, file_size,
            "first request should be at the end"
        );

        // Prefetch the data for the metadata.json file
        let entry = reader.file().entries().first().unwrap();
        let offset = entry.header_offset();
        // Get the size of the entry plus the header + size of the filename. We should also actually
        // include bytes for the extra fields but we don't have that information.
        let size =
            entry.entry().compressed_size() + 30 + entry.entry().filename().as_bytes().len() as u64;

        // The zip archive uses as BufReader which reads in chunks of 8192. To ensure we prefetch
        // enough data we round the size up to the nearest multiple of the buffer size.
        let buffer_size = 8192;
        let size = ((size + buffer_size - 1) / buffer_size) * buffer_size;

        // Fetch the bytes from the zip archive that contain the requested file.
        reader
            .inner_mut()
            .get_mut()
            .prefetch(offset..offset + size as u64)
            .await;

        // Read the contents of the metadata.json file
        let mut contents = String::new();
        reader
            .reader_with_entry(0)
            .await
            .unwrap()
            .read_to_string(&mut contents)
            .await
            .unwrap();

        // Get the number of performed requests
        let request_ranges = reader.inner_mut().get_mut().requested_ranges().await;

        assert_eq!(contents, r#"{"conda_pkg_format_version": 2}"#);
        assert_eq!(request_ranges.len(), 2);
        assert_eq!(
            request_ranges[1],
            0..size,
            "expected only two range requests"
        );
    }

    #[rstest]
    #[case(CheckSupportMethod::Head)]
    #[case(CheckSupportMethod::NegativeRangeRequest(8192))]
    #[tokio::test]
    async fn async_range_reader(#[case] check_method: CheckSupportMethod) {
        // Spawn a static file server
        let path = Path::new(&std::env::var("CARGO_MANIFEST_DIR").unwrap()).join("test-data");
        let server = StaticDirectoryServer::new(&path)
            .await
            .expect("could not initialize server");

        // Construct an AsyncRangeReader
        let (mut range, _) = AsyncHttpRangeReader::new(
            Client::new(),
            server.url().join("andes-1.8.3-pyhd8ed1ab_0.conda").unwrap(),
            check_method,
            HeaderMap::default(),
        )
        .await
        .expect("bla");

        // Also open a simple file reader
        let mut file = tokio::fs::File::open(path.join("andes-1.8.3-pyhd8ed1ab_0.conda"))
            .await
            .unwrap();

        // Read until the end and make sure that the contents matches
        let mut range_read = vec![0; 64 * 1024];
        let mut file_read = vec![0; 64 * 1024];
        loop {
            // Read with the async reader
            let range_read_bytes = range.read(&mut range_read).await.unwrap();

            // Read directly from the file
            let file_read_bytes = file
                .read_exact(&mut file_read[0..range_read_bytes])
                .await
                .unwrap();

            assert_eq!(range_read_bytes, file_read_bytes);
            assert_eq!(
                range_read[0..range_read_bytes],
                file_read[0..file_read_bytes]
            );

            if file_read_bytes == 0 && range_read_bytes == 0 {
                break;
            }
        }
    }

    #[tokio::test]
    async fn test_not_found() {
        let server = StaticDirectoryServer::new(Path::new(env!("CARGO_MANIFEST_DIR")))
            .await
            .expect("could not initialize server");
        let err = AsyncHttpRangeReader::new(
            Client::new(),
            server.url().join("not-found").unwrap(),
            CheckSupportMethod::Head,
            HeaderMap::default(),
        )
        .await
        .expect_err("expected an error");

        assert_matches!(
            err, AsyncHttpRangeReaderError::HttpError(err) if err.status() == Some(StatusCode::NOT_FOUND)
        );
    }
}