1use std::future::Future;
2use std::io;
3use std::path::Path;
4use std::pin::Pin;
5use std::sync::Arc;
6use std::task::{Context, Poll};
7
8use futures::{FutureExt, TryStreamExt};
9use tokio::io::{AsyncRead, AsyncSeekExt, ReadBuf};
10use tokio::sync::Semaphore;
11use tokio_util::compat::FuturesAsyncReadCompatExt;
12use tracing::{Instrument, info_span, instrument, warn};
13use url::Url;
14
15use uv_cache::{ArchiveId, CacheBucket, CacheEntry, WheelCache};
16use uv_cache_info::{CacheInfo, Timestamp};
17use uv_client::{
18 CacheControl, CachedClientError, Connectivity, DataWithCachePolicy, RegistryClient,
19};
20use uv_distribution_filename::{SourceDistExtension, WheelFilename};
21use uv_distribution_types::{
22 BuildInfo, BuildableSource, BuiltDist, Dist, DistRef, File, HashPolicy, Hashed, IndexUrl,
23 InstalledDist, Name, SourceDist, ToUrlError,
24};
25use uv_extract::hash::Hasher;
26use uv_fs::write_atomic;
27use uv_install_wheel::validate_and_heal_record;
28use uv_platform_tags::Tags;
29use uv_pypi_types::{HashDigest, HashDigests, PyProjectToml};
30use uv_redacted::DisplaySafeUrl;
31use uv_types::{BuildContext, BuildStack};
32use uv_warnings::warn_user_once;
33
34use crate::archive::Archive;
35use uv_python::PythonVariant;
36
37use crate::error::PythonVersion;
38use crate::metadata::{ArchiveMetadata, Metadata};
39use crate::source::SourceDistributionBuilder;
40use crate::{Error, LocalWheel, Reporter, RequiresDist};
41
42pub struct DistributionDatabase<'a, Context: BuildContext> {
55 build_context: &'a Context,
56 builder: SourceDistributionBuilder<'a, Context>,
57 client: ManagedClient<'a>,
58 reporter: Option<Arc<dyn Reporter>>,
59}
60
61impl<'a, Context: BuildContext> DistributionDatabase<'a, Context> {
62 pub fn new(
63 client: &'a RegistryClient,
64 build_context: &'a Context,
65 downloads_semaphore: Arc<Semaphore>,
66 ) -> Self {
67 Self {
68 build_context,
69 builder: SourceDistributionBuilder::new(build_context),
70 client: ManagedClient::new(client, downloads_semaphore),
71 reporter: None,
72 }
73 }
74
75 #[must_use]
77 pub fn with_build_stack(self, build_stack: &'a BuildStack) -> Self {
78 Self {
79 builder: self.builder.with_build_stack(build_stack),
80 ..self
81 }
82 }
83
84 #[must_use]
86 pub fn with_reporter(self, reporter: Arc<dyn Reporter>) -> Self {
87 Self {
88 builder: self.builder.with_reporter(reporter.clone()),
89 reporter: Some(reporter),
90 ..self
91 }
92 }
93
94 fn handle_response_errors(&self, err: reqwest::Error) -> io::Error {
96 if err.is_timeout() {
97 io::Error::new(
99 io::ErrorKind::TimedOut,
100 format!(
101 "Failed to download distribution due to network timeout. Try increasing UV_HTTP_TIMEOUT (current value: {}s).",
102 self.client.unmanaged.read_timeout().as_secs()
103 ),
104 )
105 } else {
106 io::Error::other(err)
107 }
108 }
109
110 #[instrument(skip_all, fields(%dist))]
117 pub async fn get_or_build_wheel(
118 &self,
119 dist: &Dist,
120 tags: &Tags,
121 hashes: HashPolicy<'_>,
122 ) -> Result<LocalWheel, Error> {
123 match dist {
124 Dist::Built(built) => self.get_wheel(built, hashes).await,
125 Dist::Source(source) => self.build_wheel(source, tags, hashes).await,
126 }
127 }
128
129 #[instrument(skip_all, fields(%dist))]
135 pub async fn get_installed_metadata(
136 &self,
137 dist: &InstalledDist,
138 ) -> Result<ArchiveMetadata, Error> {
139 if let Some(metadata) = self
141 .build_context
142 .dependency_metadata()
143 .get(dist.name(), Some(dist.version()))
144 {
145 return Ok(ArchiveMetadata::from_metadata23(metadata.clone()));
146 }
147
148 let metadata = dist
149 .read_metadata()
150 .map_err(|err| Error::ReadInstalled(Box::new(dist.clone()), err))?;
151
152 Ok(ArchiveMetadata::from_metadata23(metadata.clone()))
153 }
154
155 #[instrument(skip_all, fields(%dist))]
161 pub async fn get_or_build_wheel_metadata(
162 &self,
163 dist: &Dist,
164 hashes: HashPolicy<'_>,
165 ) -> Result<ArchiveMetadata, Error> {
166 match dist {
167 Dist::Built(built) => self.get_wheel_metadata(built, hashes).await,
168 Dist::Source(source) => {
169 self.build_wheel_metadata(&BuildableSource::Dist(source), hashes)
170 .await
171 }
172 }
173 }
174
175 async fn get_wheel(
180 &self,
181 dist: &BuiltDist,
182 hashes: HashPolicy<'_>,
183 ) -> Result<LocalWheel, Error> {
184 match dist {
185 BuiltDist::Registry(wheels) => {
186 let wheel = wheels.best_wheel();
187 let WheelTarget {
188 url,
189 extension,
190 size,
191 } = WheelTarget::try_from(&*wheel.file)?;
192
193 let wheel_entry = self.build_context.cache().entry(
195 CacheBucket::Wheels,
196 WheelCache::Index(&wheel.index).wheel_dir(wheel.name().as_ref()),
197 wheel.filename.cache_key(),
198 );
199
200 if url.scheme() == "file" {
202 let path = url
203 .to_file_path()
204 .map_err(|()| Error::NonFileUrl(url.clone()))?;
205 return self
206 .load_wheel(
207 &path,
208 &wheel.filename,
209 WheelExtension::Whl,
210 wheel_entry,
211 dist,
212 hashes,
213 )
214 .await;
215 }
216
217 match self
219 .stream_wheel(
220 url.clone(),
221 dist.index(),
222 &wheel.filename,
223 extension,
224 size,
225 &wheel_entry,
226 dist,
227 hashes,
228 )
229 .await
230 {
231 Ok(archive) => Ok(LocalWheel {
232 dist: Dist::Built(dist.clone()),
233 archive: self
234 .build_context
235 .cache()
236 .archive(&archive.id)
237 .into_boxed_path(),
238 hashes: archive.hashes,
239 filename: wheel.filename.clone(),
240 cache: CacheInfo::default(),
241 build: None,
242 }),
243 Err(Error::Extract(name, err)) => {
244 if err.is_http_streaming_unsupported() {
245 warn!(
246 "Streaming unsupported for {dist}; downloading wheel to disk ({err})"
247 );
248 } else if err.is_http_streaming_failed() {
249 warn!("Streaming failed for {dist}; downloading wheel to disk ({err})");
250 } else {
251 return Err(Error::Extract(name, err));
252 }
253
254 let archive = self
257 .download_wheel(
258 url,
259 dist.index(),
260 &wheel.filename,
261 extension,
262 size,
263 &wheel_entry,
264 dist,
265 hashes,
266 )
267 .await?;
268
269 Ok(LocalWheel {
270 dist: Dist::Built(dist.clone()),
271 archive: self
272 .build_context
273 .cache()
274 .archive(&archive.id)
275 .into_boxed_path(),
276 hashes: archive.hashes,
277 filename: wheel.filename.clone(),
278 cache: CacheInfo::default(),
279 build: None,
280 })
281 }
282 Err(err) => Err(err),
283 }
284 }
285
286 BuiltDist::DirectUrl(wheel) => {
287 let wheel_entry = self.build_context.cache().entry(
289 CacheBucket::Wheels,
290 WheelCache::Url(&wheel.url).wheel_dir(wheel.name().as_ref()),
291 wheel.filename.cache_key(),
292 );
293
294 match self
296 .stream_wheel(
297 wheel.url.raw().clone(),
298 None,
299 &wheel.filename,
300 WheelExtension::Whl,
301 None,
302 &wheel_entry,
303 dist,
304 hashes,
305 )
306 .await
307 {
308 Ok(archive) => Ok(LocalWheel {
309 dist: Dist::Built(dist.clone()),
310 archive: self
311 .build_context
312 .cache()
313 .archive(&archive.id)
314 .into_boxed_path(),
315 hashes: archive.hashes,
316 filename: wheel.filename.clone(),
317 cache: CacheInfo::default(),
318 build: None,
319 }),
320 Err(Error::Extract(name, err)) => {
321 if err.is_http_streaming_unsupported() {
322 warn!(
323 "Streaming unsupported for {dist}; downloading wheel to disk ({err})"
324 );
325 } else if err.is_http_streaming_failed() {
326 warn!("Streaming failed for {dist}; downloading wheel to disk ({err})");
327 } else {
328 return Err(Error::Extract(name, err));
329 }
330
331 let archive = self
334 .download_wheel(
335 wheel.url.raw().clone(),
336 None,
337 &wheel.filename,
338 WheelExtension::Whl,
339 None,
340 &wheel_entry,
341 dist,
342 hashes,
343 )
344 .await?;
345 Ok(LocalWheel {
346 dist: Dist::Built(dist.clone()),
347 archive: self
348 .build_context
349 .cache()
350 .archive(&archive.id)
351 .into_boxed_path(),
352 hashes: archive.hashes,
353 filename: wheel.filename.clone(),
354 cache: CacheInfo::default(),
355 build: None,
356 })
357 }
358 Err(err) => Err(err),
359 }
360 }
361
362 BuiltDist::Path(wheel) => {
363 let cache_entry = self.build_context.cache().entry(
364 CacheBucket::Wheels,
365 WheelCache::Url(&wheel.url).wheel_dir(wheel.name().as_ref()),
366 wheel.filename.cache_key(),
367 );
368
369 self.load_wheel(
370 &wheel.install_path,
371 &wheel.filename,
372 WheelExtension::Whl,
373 cache_entry,
374 dist,
375 hashes,
376 )
377 .await
378 }
379 }
380 }
381
382 async fn build_wheel(
388 &self,
389 dist: &SourceDist,
390 tags: &Tags,
391 hashes: HashPolicy<'_>,
392 ) -> Result<LocalWheel, Error> {
393 if let Some(extension) = dist.extension()
401 && !matches!(
402 extension,
403 SourceDistExtension::TarGz | SourceDistExtension::Zip
404 )
405 {
406 if matches!(dist, SourceDist::Registry(_)) {
407 warn_user_once!(
411 "{dist} uses a legacy source distribution format ('.{extension}') that is not compliant with PEP 625. A future version of uv will reject this source distribution. Consider upgrading to a newer version of {package}",
412 package = dist.name(),
413 );
414 } else {
415 warn_user_once!(
416 "{dist} is not a standards-compliant source distribution: expected '.tar.gz' but found '.{extension}'. A future version of uv will reject source distributions that do not meet the requirements specified in PEP 625",
417 );
418 }
419 }
420
421 let built_wheel = self
422 .builder
423 .download_and_build(&BuildableSource::Dist(dist), tags, hashes, &self.client)
424 .boxed_local()
425 .await?;
426
427 if !built_wheel.filename.is_compatible(tags) {
433 return if tags.is_cross() {
434 Err(Error::BuiltWheelIncompatibleTargetPlatform {
435 filename: built_wheel.filename,
436 python_platform: tags.python_platform().clone(),
437 python_version: PythonVersion {
438 version: tags.python_version(),
439 variant: if tags.is_freethreaded() {
440 PythonVariant::Freethreaded
441 } else {
442 PythonVariant::Default
443 },
444 },
445 })
446 } else {
447 Err(Error::BuiltWheelIncompatibleHostPlatform {
448 filename: built_wheel.filename,
449 python_platform: tags.python_platform().clone(),
450 python_version: PythonVersion {
451 version: tags.python_version(),
452 variant: if tags.is_freethreaded() {
453 PythonVariant::Freethreaded
454 } else {
455 PythonVariant::Default
456 },
457 },
458 })
459 };
460 }
461
462 #[cfg(windows)]
464 let _lock = {
465 let lock_entry = CacheEntry::new(
466 built_wheel.target.parent().unwrap(),
467 format!(
468 "{}.lock",
469 built_wheel.target.file_name().unwrap().to_str().unwrap()
470 ),
471 );
472 lock_entry.lock().await.map_err(Error::CacheLock)?
473 };
474
475 match self.build_context.cache().resolve_link(&built_wheel.target) {
478 Ok(archive) => {
479 return Ok(LocalWheel {
480 dist: Dist::Source(dist.clone()),
481 archive: archive.into_boxed_path(),
482 filename: built_wheel.filename,
483 hashes: built_wheel.hashes,
484 cache: built_wheel.cache_info,
485 build: Some(built_wheel.build_info),
486 });
487 }
488 Err(err) if err.kind() == io::ErrorKind::NotFound => {}
489 Err(err) => return Err(Error::CacheRead(err)),
490 }
491
492 let id = self
494 .unzip_wheel(
495 &built_wheel.path,
496 &built_wheel.target,
497 DistRef::Source(dist),
498 )
499 .await?;
500
501 Ok(LocalWheel {
502 dist: Dist::Source(dist.clone()),
503 archive: self.build_context.cache().archive(&id).into_boxed_path(),
504 hashes: built_wheel.hashes,
505 filename: built_wheel.filename,
506 cache: built_wheel.cache_info,
507 build: Some(built_wheel.build_info),
508 })
509 }
510
511 async fn get_wheel_metadata(
516 &self,
517 dist: &BuiltDist,
518 hashes: HashPolicy<'_>,
519 ) -> Result<ArchiveMetadata, Error> {
520 if hashes.is_generate(dist) {
535 let wheel = self.get_wheel(dist, hashes).await?;
536 let metadata = if let Some(metadata) = self
538 .build_context
539 .dependency_metadata()
540 .get(dist.name(), Some(dist.version()))
541 {
542 metadata.clone()
543 } else {
544 wheel.metadata()?
545 };
546 let hashes = wheel.hashes;
547 return Ok(ArchiveMetadata {
548 metadata: Metadata::from_metadata23(metadata),
549 hashes,
550 });
551 }
552
553 if let Some(metadata) = self
555 .build_context
556 .dependency_metadata()
557 .get(dist.name(), Some(dist.version()))
558 {
559 return Ok(ArchiveMetadata::from_metadata23(metadata.clone()));
560 }
561
562 let result = self
563 .client
564 .managed(|client| {
565 client
566 .wheel_metadata(dist, self.build_context.capabilities())
567 .boxed_local()
568 })
569 .await;
570
571 match result {
572 Ok(metadata) => {
573 Ok(ArchiveMetadata::from_metadata23(metadata))
575 }
576 Err(err) if err.is_http_streaming_unsupported() => {
577 warn!(
578 "Streaming unsupported when fetching metadata for {dist}; downloading wheel directly ({err})"
579 );
580
581 let wheel = self.get_wheel(dist, hashes).await?;
584 let metadata = wheel.metadata()?;
585 let hashes = wheel.hashes;
586 Ok(ArchiveMetadata {
587 metadata: Metadata::from_metadata23(metadata),
588 hashes,
589 })
590 }
591 Err(err) => Err(err.into()),
592 }
593 }
594
595 pub async fn build_wheel_metadata(
600 &self,
601 source: &BuildableSource<'_>,
602 hashes: HashPolicy<'_>,
603 ) -> Result<ArchiveMetadata, Error> {
604 if let Some(dist) = source.as_dist() {
606 if let Some(metadata) = self
607 .build_context
608 .dependency_metadata()
609 .get(dist.name(), dist.version())
610 {
611 self.builder.resolve_revision(source, &self.client).await?;
614
615 return Ok(ArchiveMetadata::from_metadata23(metadata.clone()));
616 }
617 }
618
619 let metadata = self
620 .builder
621 .download_and_build_metadata(source, hashes, &self.client)
622 .boxed_local()
623 .await?;
624
625 Ok(metadata)
626 }
627
628 pub async fn requires_dist(
630 &self,
631 path: &Path,
632 pyproject_toml: &PyProjectToml,
633 ) -> Result<Option<RequiresDist>, Error> {
634 self.builder
635 .source_tree_requires_dist(
636 path,
637 pyproject_toml,
638 self.client.unmanaged.credentials_cache(),
639 )
640 .await
641 }
642
643 async fn stream_wheel(
645 &self,
646 url: DisplaySafeUrl,
647 index: Option<&IndexUrl>,
648 filename: &WheelFilename,
649 extension: WheelExtension,
650 size: Option<u64>,
651 wheel_entry: &CacheEntry,
652 dist: &BuiltDist,
653 hashes: HashPolicy<'_>,
654 ) -> Result<Archive, Error> {
655 #[cfg(windows)]
657 let _lock = {
658 let lock_entry = wheel_entry.with_file(format!("{}.lock", filename.stem()));
659 lock_entry.lock().await.map_err(Error::CacheLock)?
660 };
661
662 let http_entry = wheel_entry.with_file(format!("{}.http", filename.cache_key()));
664
665 let query_url = &url.clone();
666
667 let download = |response: reqwest::Response| {
668 async {
669 let size = size.or_else(|| content_length(&response));
670
671 let progress = self
672 .reporter
673 .as_ref()
674 .map(|reporter| (reporter, reporter.on_download_start(dist.name(), size)));
675
676 let reader = response
677 .bytes_stream()
678 .map_err(|err| self.handle_response_errors(err))
679 .into_async_read();
680
681 let algorithms = hashes.algorithms();
683 let mut hashers = algorithms.into_iter().map(Hasher::from).collect::<Vec<_>>();
684 let mut hasher = uv_extract::hash::HashReader::new(reader.compat(), &mut hashers);
685
686 let temp_dir = tempfile::tempdir_in(self.build_context.cache().root())
688 .map_err(Error::CacheWrite)?;
689
690 let files = match progress {
691 Some((reporter, progress)) => {
692 let mut reader = ProgressReader::new(&mut hasher, progress, &**reporter);
693 match extension {
694 WheelExtension::Whl => {
695 uv_extract::stream::unzip(query_url, &mut reader, temp_dir.path())
696 .await
697 .map_err(|err| Error::Extract(filename.to_string(), err))?
698 }
699 WheelExtension::WhlZst => {
700 uv_extract::stream::untar_zst(&mut reader, temp_dir.path())
701 .await
702 .map_err(|err| Error::Extract(filename.to_string(), err))?
703 }
704 }
705 }
706 None => match extension {
707 WheelExtension::Whl => {
708 uv_extract::stream::unzip(query_url, &mut hasher, temp_dir.path())
709 .await
710 .map_err(|err| Error::Extract(filename.to_string(), err))?
711 }
712 WheelExtension::WhlZst => {
713 uv_extract::stream::untar_zst(&mut hasher, temp_dir.path())
714 .await
715 .map_err(|err| Error::Extract(filename.to_string(), err))?
716 }
717 },
718 };
719 if !hashes.is_none() {
721 hasher.finish().await.map_err(Error::HashExhaustion)?;
722 }
723
724 validate_and_heal_record(temp_dir.path(), files.iter(), dist)
727 .map_err(Error::InstallWheelError)?;
728
729 let id = self
731 .build_context
732 .cache()
733 .persist(temp_dir.keep(), wheel_entry.path())
734 .await
735 .map_err(Error::CacheRead)?;
736
737 if let Some((reporter, progress)) = progress {
738 reporter.on_download_complete(dist.name(), progress);
739 }
740
741 Ok(Archive::new(
742 id,
743 hashers.into_iter().map(HashDigest::from).collect(),
744 filename.clone(),
745 ))
746 }
747 .instrument(info_span!("wheel", wheel = %dist))
748 };
749
750 let req = self.request(url.clone())?;
752
753 let cache_control = match self.client.unmanaged.connectivity() {
755 Connectivity::Online => {
756 if let Some(header) = index.and_then(|index| {
757 self.build_context
758 .locations()
759 .artifact_cache_control_for(index)
760 }) {
761 CacheControl::Override(header)
762 } else {
763 CacheControl::from(
764 self.build_context
765 .cache()
766 .freshness(&http_entry, Some(&filename.name), None)
767 .map_err(Error::CacheRead)?,
768 )
769 }
770 }
771 Connectivity::Offline => CacheControl::AllowStale,
772 };
773
774 let archive = self
775 .client
776 .managed(|client| {
777 client.cached_client().get_serde_with_retry(
778 req,
779 &http_entry,
780 cache_control.clone(),
781 download,
782 )
783 })
784 .await
785 .map_err(|err| match err {
786 CachedClientError::Callback { err, .. } => err,
787 CachedClientError::Client(err) => Error::Client(err),
788 })?;
789
790 let archive = Some(archive)
792 .filter(|archive| archive.has_digests(hashes))
793 .filter(|archive| archive.exists(self.build_context.cache()));
794
795 let archive = if let Some(archive) = archive {
796 archive
797 } else {
798 self.client
799 .managed(async |client| {
800 client
801 .cached_client()
802 .skip_cache_with_retry(
803 self.request(url)?,
804 &http_entry,
805 cache_control,
806 download,
807 )
808 .await
809 .map_err(|err| match err {
810 CachedClientError::Callback { err, .. } => err,
811 CachedClientError::Client(err) => Error::Client(err),
812 })
813 })
814 .await?
815 };
816
817 Ok(archive)
818 }
819
820 async fn download_wheel(
822 &self,
823 url: DisplaySafeUrl,
824 index: Option<&IndexUrl>,
825 filename: &WheelFilename,
826 extension: WheelExtension,
827 size: Option<u64>,
828 wheel_entry: &CacheEntry,
829 dist: &BuiltDist,
830 hashes: HashPolicy<'_>,
831 ) -> Result<Archive, Error> {
832 #[cfg(windows)]
834 let _lock = {
835 let lock_entry = wheel_entry.with_file(format!("{}.lock", filename.stem()));
836 lock_entry.lock().await.map_err(Error::CacheLock)?
837 };
838
839 let http_entry = wheel_entry.with_file(format!("{}.http", filename.cache_key()));
841
842 let query_url = &url.clone();
843
844 let download = |response: reqwest::Response| {
845 async {
846 let size = size.or_else(|| content_length(&response));
847
848 let progress = self
849 .reporter
850 .as_ref()
851 .map(|reporter| (reporter, reporter.on_download_start(dist.name(), size)));
852
853 let reader = response
854 .bytes_stream()
855 .map_err(|err| self.handle_response_errors(err))
856 .into_async_read();
857
858 let temp_file = tempfile::tempfile_in(self.build_context.cache().root())
860 .map_err(Error::CacheWrite)?;
861 let mut writer = tokio::io::BufWriter::new(fs_err::tokio::File::from_std(
862 fs_err::File::from_parts(temp_file, self.build_context.cache().root()),
864 ));
865
866 match progress {
867 Some((reporter, progress)) => {
868 let mut reader =
872 ProgressReader::new(reader.compat(), progress, &**reporter);
873
874 tokio::io::copy(&mut reader, &mut writer)
875 .await
876 .map_err(Error::CacheWrite)?;
877 }
878 None => {
879 tokio::io::copy(&mut reader.compat(), &mut writer)
880 .await
881 .map_err(Error::CacheWrite)?;
882 }
883 }
884
885 let temp_dir = tempfile::tempdir_in(self.build_context.cache().root())
887 .map_err(Error::CacheWrite)?;
888 let mut file = writer.into_inner();
889 file.seek(io::SeekFrom::Start(0))
890 .await
891 .map_err(Error::CacheWrite)?;
892
893 let (files, hashes) = if hashes.is_none() {
895 let file = file.into_std().await;
897 let target = temp_dir.path().to_owned();
898 let files = tokio::task::spawn_blocking(move || match extension {
899 WheelExtension::Whl => uv_extract::unzip(file, &target),
900 WheelExtension::WhlZst => uv_extract::stream::untar_zst_file(file, &target),
901 })
902 .await?
903 .map_err(|err| Error::Extract(filename.to_string(), err))?;
904
905 (files, HashDigests::empty())
906 } else {
907 let algorithms = hashes.algorithms();
909 let mut hashers = algorithms.into_iter().map(Hasher::from).collect::<Vec<_>>();
910 let mut hasher = uv_extract::hash::HashReader::new(file, &mut hashers);
911
912 let files = match extension {
913 WheelExtension::Whl => {
914 uv_extract::stream::unzip(query_url, &mut hasher, temp_dir.path())
915 .await
916 .map_err(|err| Error::Extract(filename.to_string(), err))?
917 }
918 WheelExtension::WhlZst => {
919 uv_extract::stream::untar_zst(&mut hasher, temp_dir.path())
920 .await
921 .map_err(|err| Error::Extract(filename.to_string(), err))?
922 }
923 };
924
925 hasher.finish().await.map_err(Error::HashExhaustion)?;
927 let hashes = hashers.into_iter().map(HashDigest::from).collect();
928
929 (files, hashes)
930 };
931
932 validate_and_heal_record(temp_dir.path(), files.iter(), dist)
935 .map_err(Error::InstallWheelError)?;
936
937 let id = self
939 .build_context
940 .cache()
941 .persist(temp_dir.keep(), wheel_entry.path())
942 .await
943 .map_err(Error::CacheRead)?;
944
945 if let Some((reporter, progress)) = progress {
946 reporter.on_download_complete(dist.name(), progress);
947 }
948
949 Ok(Archive::new(id, hashes, filename.clone()))
950 }
951 .instrument(info_span!("wheel", wheel = %dist))
952 };
953
954 let req = self.request(url.clone())?;
956
957 let cache_control = match self.client.unmanaged.connectivity() {
959 Connectivity::Online => {
960 if let Some(header) = index.and_then(|index| {
961 self.build_context
962 .locations()
963 .artifact_cache_control_for(index)
964 }) {
965 CacheControl::Override(header)
966 } else {
967 CacheControl::from(
968 self.build_context
969 .cache()
970 .freshness(&http_entry, Some(&filename.name), None)
971 .map_err(Error::CacheRead)?,
972 )
973 }
974 }
975 Connectivity::Offline => CacheControl::AllowStale,
976 };
977
978 let archive = self
979 .client
980 .managed(|client| {
981 client.cached_client().get_serde_with_retry(
982 req,
983 &http_entry,
984 cache_control.clone(),
985 download,
986 )
987 })
988 .await
989 .map_err(|err| match err {
990 CachedClientError::Callback { err, .. } => err,
991 CachedClientError::Client(err) => Error::Client(err),
992 })?;
993
994 let archive = Some(archive)
996 .filter(|archive| archive.has_digests(hashes))
997 .filter(|archive| archive.exists(self.build_context.cache()));
998
999 let archive = if let Some(archive) = archive {
1000 archive
1001 } else {
1002 self.client
1003 .managed(async |client| {
1004 client
1005 .cached_client()
1006 .skip_cache_with_retry(
1007 self.request(url)?,
1008 &http_entry,
1009 cache_control,
1010 download,
1011 )
1012 .await
1013 .map_err(|err| match err {
1014 CachedClientError::Callback { err, .. } => err,
1015 CachedClientError::Client(err) => Error::Client(err),
1016 })
1017 })
1018 .await?
1019 };
1020
1021 Ok(archive)
1022 }
1023
1024 async fn load_wheel(
1026 &self,
1027 path: &Path,
1028 filename: &WheelFilename,
1029 extension: WheelExtension,
1030 wheel_entry: CacheEntry,
1031 dist: &BuiltDist,
1032 hashes: HashPolicy<'_>,
1033 ) -> Result<LocalWheel, Error> {
1034 #[cfg(windows)]
1035 let _lock = {
1036 let lock_entry = wheel_entry.with_file(format!("{}.lock", filename.stem()));
1037 lock_entry.lock().await.map_err(Error::CacheLock)?
1038 };
1039
1040 let modified = Timestamp::from_path(path).map_err(Error::CacheRead)?;
1042
1043 let pointer_entry = wheel_entry.with_file(format!("{}.rev", filename.cache_key()));
1045 let pointer = LocalArchivePointer::read_from(&pointer_entry)?;
1046
1047 let archive = pointer
1049 .filter(|pointer| pointer.is_up_to_date(modified))
1050 .map(LocalArchivePointer::into_archive)
1051 .filter(|archive| archive.has_digests(hashes));
1052
1053 if let Some(archive) = archive {
1055 Ok(LocalWheel {
1056 dist: Dist::Built(dist.clone()),
1057 archive: self
1058 .build_context
1059 .cache()
1060 .archive(&archive.id)
1061 .into_boxed_path(),
1062 hashes: archive.hashes,
1063 filename: filename.clone(),
1064 cache: CacheInfo::from_timestamp(modified),
1065 build: None,
1066 })
1067 } else if hashes.is_none() {
1068 let archive = Archive::new(
1070 self.unzip_wheel(path, wheel_entry.path(), DistRef::Built(dist))
1071 .await?,
1072 HashDigests::empty(),
1073 filename.clone(),
1074 );
1075
1076 let pointer = LocalArchivePointer {
1078 timestamp: modified,
1079 archive: archive.clone(),
1080 };
1081 pointer.write_to(&pointer_entry).await?;
1082
1083 Ok(LocalWheel {
1084 dist: Dist::Built(dist.clone()),
1085 archive: self
1086 .build_context
1087 .cache()
1088 .archive(&archive.id)
1089 .into_boxed_path(),
1090 hashes: archive.hashes,
1091 filename: filename.clone(),
1092 cache: CacheInfo::from_timestamp(modified),
1093 build: None,
1094 })
1095 } else {
1096 let file = fs_err::tokio::File::open(path)
1098 .await
1099 .map_err(Error::CacheRead)?;
1100 let temp_dir = tempfile::tempdir_in(self.build_context.cache().root())
1101 .map_err(Error::CacheWrite)?;
1102
1103 let algorithms = hashes.algorithms();
1105 let mut hashers = algorithms.into_iter().map(Hasher::from).collect::<Vec<_>>();
1106 let mut hasher = uv_extract::hash::HashReader::new(file, &mut hashers);
1107
1108 let files = match extension {
1110 WheelExtension::Whl => {
1111 uv_extract::stream::unzip(path.display(), &mut hasher, temp_dir.path())
1112 .await
1113 .map_err(|err| Error::Extract(filename.to_string(), err))?
1114 }
1115 WheelExtension::WhlZst => {
1116 uv_extract::stream::untar_zst(&mut hasher, temp_dir.path())
1117 .await
1118 .map_err(|err| Error::Extract(filename.to_string(), err))?
1119 }
1120 };
1121
1122 hasher.finish().await.map_err(Error::HashExhaustion)?;
1124
1125 let hashes = hashers.into_iter().map(HashDigest::from).collect();
1126
1127 validate_and_heal_record(temp_dir.path(), files.iter(), dist)
1130 .map_err(Error::InstallWheelError)?;
1131
1132 let id = self
1134 .build_context
1135 .cache()
1136 .persist(temp_dir.keep(), wheel_entry.path())
1137 .await
1138 .map_err(Error::CacheWrite)?;
1139
1140 let archive = Archive::new(id, hashes, filename.clone());
1142
1143 let pointer = LocalArchivePointer {
1145 timestamp: modified,
1146 archive: archive.clone(),
1147 };
1148 pointer.write_to(&pointer_entry).await?;
1149
1150 Ok(LocalWheel {
1151 dist: Dist::Built(dist.clone()),
1152 archive: self
1153 .build_context
1154 .cache()
1155 .archive(&archive.id)
1156 .into_boxed_path(),
1157 hashes: archive.hashes,
1158 filename: filename.clone(),
1159 cache: CacheInfo::from_timestamp(modified),
1160 build: None,
1161 })
1162 }
1163 }
1164
1165 async fn unzip_wheel(
1167 &self,
1168 path: &Path,
1169 target: &Path,
1170 dist: DistRef<'_>,
1171 ) -> Result<ArchiveId, Error> {
1172 let (temp_dir, files) = tokio::task::spawn_blocking({
1173 let path = path.to_owned();
1174 let root = self.build_context.cache().root().to_path_buf();
1175 move || -> Result<_, Error> {
1176 let temp_dir = tempfile::tempdir_in(root).map_err(Error::CacheWrite)?;
1178 let reader = fs_err::File::open(&path).map_err(Error::CacheWrite)?;
1179 let files = uv_extract::unzip(reader, temp_dir.path())
1180 .map_err(|err| Error::Extract(path.to_string_lossy().into_owned(), err))?;
1181 Ok((temp_dir, files))
1182 }
1183 })
1184 .await??;
1185
1186 validate_and_heal_record(temp_dir.path(), files.iter(), dist)
1188 .map_err(Error::InstallWheelError)?;
1189
1190 let id = self
1192 .build_context
1193 .cache()
1194 .persist(temp_dir.keep(), target)
1195 .await
1196 .map_err(Error::CacheWrite)?;
1197
1198 Ok(id)
1199 }
1200
1201 fn request(&self, url: DisplaySafeUrl) -> Result<reqwest::Request, reqwest::Error> {
1203 self.client
1204 .unmanaged
1205 .uncached_client(&url)
1206 .get(Url::from(url))
1207 .header(
1208 "accept-encoding",
1212 reqwest::header::HeaderValue::from_static("identity"),
1213 )
1214 .build()
1215 }
1216
1217 pub fn client(&self) -> &ManagedClient<'a> {
1219 &self.client
1220 }
1221}
1222
1223pub struct ManagedClient<'a> {
1225 pub unmanaged: &'a RegistryClient,
1226 control: Arc<Semaphore>,
1227}
1228
1229impl<'a> ManagedClient<'a> {
1230 fn new(client: &'a RegistryClient, control: Arc<Semaphore>) -> Self {
1232 ManagedClient {
1233 unmanaged: client,
1234 control,
1235 }
1236 }
1237
1238 pub async fn managed<F, T>(&self, f: impl FnOnce(&'a RegistryClient) -> F) -> T
1243 where
1244 F: Future<Output = T>,
1245 {
1246 let _permit = self.control.acquire().await.unwrap();
1247 f(self.unmanaged).await
1248 }
1249
1250 pub async fn manual<F, T>(&'a self, f: impl FnOnce(&'a RegistryClient, &'a Semaphore) -> F) -> T
1258 where
1259 F: Future<Output = T>,
1260 {
1261 f(self.unmanaged, &self.control).await
1262 }
1263}
1264
1265fn content_length(response: &reqwest::Response) -> Option<u64> {
1267 response
1268 .headers()
1269 .get(reqwest::header::CONTENT_LENGTH)
1270 .and_then(|val| val.to_str().ok())
1271 .and_then(|val| val.parse::<u64>().ok())
1272}
1273
1274struct ProgressReader<'a, R> {
1276 reader: R,
1277 index: usize,
1278 reporter: &'a dyn Reporter,
1279}
1280
1281impl<'a, R> ProgressReader<'a, R> {
1282 fn new(reader: R, index: usize, reporter: &'a dyn Reporter) -> Self {
1284 Self {
1285 reader,
1286 index,
1287 reporter,
1288 }
1289 }
1290}
1291
1292impl<R> AsyncRead for ProgressReader<'_, R>
1293where
1294 R: AsyncRead + Unpin,
1295{
1296 fn poll_read(
1297 mut self: Pin<&mut Self>,
1298 cx: &mut Context<'_>,
1299 buf: &mut ReadBuf<'_>,
1300 ) -> Poll<io::Result<()>> {
1301 Pin::new(&mut self.as_mut().reader)
1302 .poll_read(cx, buf)
1303 .map_ok(|()| {
1304 self.reporter
1305 .on_download_progress(self.index, buf.filled().len() as u64);
1306 })
1307 }
1308}
1309
1310#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
1314pub struct HttpArchivePointer {
1315 archive: Archive,
1316}
1317
1318impl HttpArchivePointer {
1319 pub fn read_from(path: impl AsRef<Path>) -> Result<Option<Self>, Error> {
1321 match fs_err::File::open(path.as_ref()) {
1322 Ok(file) => {
1323 let data = DataWithCachePolicy::from_reader(file)?.data;
1324 let archive = rmp_serde::from_slice::<Archive>(&data)?;
1325 Ok(Some(Self { archive }))
1326 }
1327 Err(err) if err.kind() == io::ErrorKind::NotFound => Ok(None),
1328 Err(err) => Err(Error::CacheRead(err)),
1329 }
1330 }
1331
1332 pub fn into_archive(self) -> Archive {
1334 self.archive
1335 }
1336
1337 pub fn to_cache_info(&self) -> CacheInfo {
1339 CacheInfo::default()
1340 }
1341
1342 pub fn to_build_info(&self) -> Option<BuildInfo> {
1344 None
1345 }
1346}
1347
1348#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
1352pub struct LocalArchivePointer {
1353 timestamp: Timestamp,
1354 archive: Archive,
1355}
1356
1357impl LocalArchivePointer {
1358 pub fn read_from(path: impl AsRef<Path>) -> Result<Option<Self>, Error> {
1360 match fs_err::read(path) {
1361 Ok(cached) => Ok(Some(rmp_serde::from_slice::<Self>(&cached)?)),
1362 Err(err) if err.kind() == io::ErrorKind::NotFound => Ok(None),
1363 Err(err) => Err(Error::CacheRead(err)),
1364 }
1365 }
1366
1367 pub async fn write_to(&self, entry: &CacheEntry) -> Result<(), Error> {
1369 write_atomic(entry.path(), rmp_serde::to_vec(&self)?)
1370 .await
1371 .map_err(Error::CacheWrite)
1372 }
1373
1374 pub fn is_up_to_date(&self, modified: Timestamp) -> bool {
1376 self.timestamp == modified
1377 }
1378
1379 pub fn into_archive(self) -> Archive {
1381 self.archive
1382 }
1383
1384 pub fn to_cache_info(&self) -> CacheInfo {
1386 CacheInfo::from_timestamp(self.timestamp)
1387 }
1388
1389 pub fn to_build_info(&self) -> Option<BuildInfo> {
1391 None
1392 }
1393}
1394
1395#[derive(Debug, Clone)]
1396struct WheelTarget {
1397 url: DisplaySafeUrl,
1399 extension: WheelExtension,
1401 size: Option<u64>,
1403}
1404
1405impl TryFrom<&File> for WheelTarget {
1406 type Error = ToUrlError;
1407
1408 fn try_from(file: &File) -> Result<Self, Self::Error> {
1410 let url = file.url.to_url()?;
1411 if let Some(zstd) = file.zstd.as_ref() {
1412 Ok(Self {
1413 url: add_tar_zst_extension(url),
1414 extension: WheelExtension::WhlZst,
1415 size: zstd.size,
1416 })
1417 } else {
1418 Ok(Self {
1419 url,
1420 extension: WheelExtension::Whl,
1421 size: file.size,
1422 })
1423 }
1424 }
1425}
1426
1427#[derive(Debug, Copy, Clone, PartialEq, Eq)]
1428enum WheelExtension {
1429 Whl,
1431 WhlZst,
1433}
1434
1435#[must_use]
1437fn add_tar_zst_extension(mut url: DisplaySafeUrl) -> DisplaySafeUrl {
1438 let mut path = url.path().to_string();
1439
1440 if !path.ends_with(".tar.zst") {
1441 path.push_str(".tar.zst");
1442 }
1443
1444 url.set_path(&path);
1445 url
1446}
1447
1448#[cfg(test)]
1449mod tests {
1450 use super::*;
1451
1452 #[test]
1453 fn test_add_tar_zst_extension() {
1454 let url =
1455 DisplaySafeUrl::parse("https://files.pythonhosted.org/flask-3.1.0-py3-none-any.whl")
1456 .unwrap();
1457 assert_eq!(
1458 add_tar_zst_extension(url).as_str(),
1459 "https://files.pythonhosted.org/flask-3.1.0-py3-none-any.whl.tar.zst"
1460 );
1461
1462 let url = DisplaySafeUrl::parse(
1463 "https://files.pythonhosted.org/flask-3.1.0-py3-none-any.whl.tar.zst",
1464 )
1465 .unwrap();
1466 assert_eq!(
1467 add_tar_zst_extension(url).as_str(),
1468 "https://files.pythonhosted.org/flask-3.1.0-py3-none-any.whl.tar.zst"
1469 );
1470
1471 let url = DisplaySafeUrl::parse(
1472 "https://files.pythonhosted.org/flask-3.1.0%2Bcu124-py3-none-any.whl",
1473 )
1474 .unwrap();
1475 assert_eq!(
1476 add_tar_zst_extension(url).as_str(),
1477 "https://files.pythonhosted.org/flask-3.1.0%2Bcu124-py3-none-any.whl.tar.zst"
1478 );
1479 }
1480}