summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.pre-commit-config.yaml1
-rw-r--r--src/pip/_internal/index/collector.py79
-rw-r--r--src/pip/_internal/index/package_finder.py163
3 files changed, 130 insertions, 113 deletions
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 8882f6bbe..21bc7009a 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -22,7 +22,6 @@ repos:
- id: black
exclude: |
(?x)
- ^src/pip/_internal/index|
^src/pip/_internal/models|
^src/pip/_internal/operations|
^src/pip/_internal/vcs|
diff --git a/src/pip/_internal/index/collector.py b/src/pip/_internal/index/collector.py
index 14d745eef..ddb7678b5 100644
--- a/src/pip/_internal/index/collector.py
+++ b/src/pip/_internal/index/collector.py
@@ -52,7 +52,7 @@ def _match_vcs_scheme(url: str) -> Optional[str]:
Returns the matched VCS scheme, or None if there's no match.
"""
for scheme in vcs.schemes:
- if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
+ if url.lower().startswith(scheme) and url[len(scheme)] in "+:":
return scheme
return None
@@ -85,7 +85,7 @@ def _ensure_html_response(url: str, session: PipSession) -> None:
`_NotHTML` if the content type is not text/html.
"""
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
- if scheme not in {'http', 'https'}:
+ if scheme not in {"http", "https"}:
raise _NotHTTP()
resp = session.head(url, allow_redirects=True)
@@ -110,7 +110,7 @@ def _get_html_response(url: str, session: PipSession) -> Response:
if is_archive_file(Link(url).filename):
_ensure_html_response(url, session=session)
- logger.debug('Getting page %s', redact_auth_from_url(url))
+ logger.debug("Getting page %s", redact_auth_from_url(url))
resp = session.get(
url,
@@ -145,12 +145,11 @@ def _get_html_response(url: str, session: PipSession) -> Response:
def _get_encoding_from_headers(headers: ResponseHeaders) -> Optional[str]:
- """Determine if we have any encoding information in our headers.
- """
+ """Determine if we have any encoding information in our headers."""
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
- return params['charset']
+ return params["charset"]
return None
@@ -195,7 +194,7 @@ def _clean_file_url_path(part: str) -> str:
# percent-encoded: /
-_reserved_chars_re = re.compile('(@|%2F)', re.IGNORECASE)
+_reserved_chars_re = re.compile("(@|%2F)", re.IGNORECASE)
def _clean_url_path(path: str, is_local_path: bool) -> str:
@@ -212,12 +211,12 @@ def _clean_url_path(path: str, is_local_path: bool) -> str:
parts = _reserved_chars_re.split(path)
cleaned_parts = []
- for to_clean, reserved in pairwise(itertools.chain(parts, [''])):
+ for to_clean, reserved in pairwise(itertools.chain(parts, [""])):
cleaned_parts.append(clean_func(to_clean))
# Normalize %xx escapes (e.g. %2f -> %2F)
cleaned_parts.append(reserved.upper())
- return ''.join(cleaned_parts)
+ return "".join(cleaned_parts)
def _clean_link(url: str) -> str:
@@ -248,10 +247,10 @@ def _create_link_from_element(
return None
url = _clean_link(urllib.parse.urljoin(base_url, href))
- pyrequire = anchor.get('data-requires-python')
+ pyrequire = anchor.get("data-requires-python")
pyrequire = html.unescape(pyrequire) if pyrequire else None
- yanked_reason = anchor.get('data-yanked')
+ yanked_reason = anchor.get("data-yanked")
if yanked_reason:
yanked_reason = html.unescape(yanked_reason)
@@ -271,8 +270,7 @@ class CacheablePageContent:
self.page = page
def __eq__(self, other: object) -> bool:
- return (isinstance(other, type(self)) and
- self.page.url == other.page.url)
+ return isinstance(other, type(self)) and self.page.url == other.page.url
def __hash__(self) -> int:
return hash(self.page.url)
@@ -353,7 +351,7 @@ class HTMLPage:
def _handle_get_page_fail(
link: Link,
reason: Union[str, Exception],
- meth: Optional[Callable[..., None]] = None
+ meth: Optional[Callable[..., None]] = None,
) -> None:
if meth is None:
meth = logger.debug
@@ -366,7 +364,8 @@ def _make_html_page(response: Response, cache_link_parsing: bool = True) -> HTML
response.content,
encoding=encoding,
url=response.url,
- cache_link_parsing=cache_link_parsing)
+ cache_link_parsing=cache_link_parsing,
+ )
def _get_html_page(
@@ -377,37 +376,43 @@ def _get_html_page(
"_get_html_page() missing 1 required keyword argument: 'session'"
)
- url = link.url.split('#', 1)[0]
+ url = link.url.split("#", 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
vcs_scheme = _match_vcs_scheme(url)
if vcs_scheme:
- logger.warning('Cannot look at %s URL %s because it does not support '
- 'lookup as web pages.', vcs_scheme, link)
+ logger.warning(
+ "Cannot look at %s URL %s because it does not support lookup as web pages.",
+ vcs_scheme,
+ link,
+ )
return None
# Tack index.html onto file:// URLs that point to directories
scheme, _, path, _, _, _ = urllib.parse.urlparse(url)
- if (scheme == 'file' and os.path.isdir(urllib.request.url2pathname(path))):
+ if scheme == "file" and os.path.isdir(urllib.request.url2pathname(path)):
# add trailing slash if not present so urljoin doesn't trim
# final segment
- if not url.endswith('/'):
- url += '/'
- url = urllib.parse.urljoin(url, 'index.html')
- logger.debug(' file: URL is directory, getting %s', url)
+ if not url.endswith("/"):
+ url += "/"
+ url = urllib.parse.urljoin(url, "index.html")
+ logger.debug(" file: URL is directory, getting %s", url)
try:
resp = _get_html_response(url, session=session)
except _NotHTTP:
logger.warning(
- 'Skipping page %s because it looks like an archive, and cannot '
- 'be checked by a HTTP HEAD request.', link,
+ "Skipping page %s because it looks like an archive, and cannot "
+ "be checked by a HTTP HEAD request.",
+ link,
)
except _NotHTML as exc:
logger.warning(
- 'Skipping page %s because the %s request got Content-Type: %s.'
- 'The only supported Content-Type is text/html',
- link, exc.request_desc, exc.content_type,
+ "Skipping page %s because the %s request got Content-Type: %s."
+ "The only supported Content-Type is text/html",
+ link,
+ exc.request_desc,
+ exc.content_type,
)
except NetworkConnectionError as exc:
_handle_get_page_fail(link, exc)
@@ -422,8 +427,7 @@ def _get_html_page(
except requests.Timeout:
_handle_get_page_fail(link, "timed out")
else:
- return _make_html_page(resp,
- cache_link_parsing=link.cache_link_parsing)
+ return _make_html_page(resp, cache_link_parsing=link.cache_link_parsing)
return None
@@ -451,9 +455,10 @@ class LinkCollector:
@classmethod
def create(
- cls, session: PipSession,
+ cls,
+ session: PipSession,
options: Values,
- suppress_no_index: bool = False
+ suppress_no_index: bool = False,
) -> "LinkCollector":
"""
:param session: The Session to use to make requests.
@@ -463,8 +468,8 @@ class LinkCollector:
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index and not suppress_no_index:
logger.debug(
- 'Ignoring indexes: %s',
- ','.join(redact_auth_from_url(url) for url in index_urls),
+ "Ignoring indexes: %s",
+ ",".join(redact_auth_from_url(url) for url in index_urls),
)
index_urls = []
@@ -472,10 +477,12 @@ class LinkCollector:
find_links = options.find_links or []
search_scope = SearchScope.create(
- find_links=find_links, index_urls=index_urls,
+ find_links=find_links,
+ index_urls=index_urls,
)
link_collector = LinkCollector(
- session=session, search_scope=search_scope,
+ session=session,
+ search_scope=search_scope,
)
return link_collector
diff --git a/src/pip/_internal/index/package_finder.py b/src/pip/_internal/index/package_finder.py
index 2dadb5aef..4f0e642f3 100644
--- a/src/pip/_internal/index/package_finder.py
+++ b/src/pip/_internal/index/package_finder.py
@@ -39,15 +39,13 @@ from pip._internal.utils.packaging import check_requires_python
from pip._internal.utils.unpacking import SUPPORTED_EXTENSIONS
from pip._internal.utils.urls import url_to_path
-__all__ = ['FormatControl', 'BestCandidateResult', 'PackageFinder']
+__all__ = ["FormatControl", "BestCandidateResult", "PackageFinder"]
logger = getLogger(__name__)
BuildTag = Union[Tuple[()], Tuple[int, str]]
-CandidateSortingKey = (
- Tuple[int, int, int, _BaseVersion, Optional[int], BuildTag]
-)
+CandidateSortingKey = Tuple[int, int, int, _BaseVersion, Optional[int], BuildTag]
def _check_link_requires_python(
@@ -66,27 +64,32 @@ def _check_link_requires_python(
"""
try:
is_compatible = check_requires_python(
- link.requires_python, version_info=version_info,
+ link.requires_python,
+ version_info=version_info,
)
except specifiers.InvalidSpecifier:
logger.debug(
"Ignoring invalid Requires-Python (%r) for link: %s",
- link.requires_python, link,
+ link.requires_python,
+ link,
)
else:
if not is_compatible:
- version = '.'.join(map(str, version_info))
+ version = ".".join(map(str, version_info))
if not ignore_requires_python:
logger.verbose(
- 'Link requires a different Python (%s not in: %r): %s',
- version, link.requires_python, link,
+ "Link requires a different Python (%s not in: %r): %s",
+ version,
+ link.requires_python,
+ link,
)
return False
logger.debug(
- 'Ignoring failed Requires-Python check (%s not in: %r) '
- 'for link: %s',
- version, link.requires_python, link,
+ "Ignoring failed Requires-Python check (%s not in: %r) " "for link: %s",
+ version,
+ link.requires_python,
+ link,
)
return True
@@ -98,7 +101,7 @@ class LinkEvaluator:
Responsible for evaluating links for a particular project.
"""
- _py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
+ _py_version_re = re.compile(r"-py([123]\.?[0-9]?)$")
# Don't include an allow_yanked default value to make sure each call
# site considers whether yanked releases are allowed. This also causes
@@ -152,8 +155,8 @@ class LinkEvaluator:
"""
version = None
if link.is_yanked and not self._allow_yanked:
- reason = link.yanked_reason or '<none given>'
- return (False, f'yanked for reason: {reason}')
+ reason = link.yanked_reason or "<none given>"
+ return (False, f"yanked for reason: {reason}")
if link.egg_fragment:
egg_info = link.egg_fragment
@@ -161,23 +164,21 @@ class LinkEvaluator:
else:
egg_info, ext = link.splitext()
if not ext:
- return (False, 'not a file')
+ return (False, "not a file")
if ext not in SUPPORTED_EXTENSIONS:
- return (False, f'unsupported archive format: {ext}')
+ return (False, f"unsupported archive format: {ext}")
if "binary" not in self._formats and ext == WHEEL_EXTENSION:
- reason = 'No binaries permitted for {}'.format(
- self.project_name)
+ reason = "No binaries permitted for {}".format(self.project_name)
return (False, reason)
- if "macosx10" in link.path and ext == '.zip':
- return (False, 'macosx10 one')
+ if "macosx10" in link.path and ext == ".zip":
+ return (False, "macosx10 one")
if ext == WHEEL_EXTENSION:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
- return (False, 'invalid wheel filename')
+ return (False, "invalid wheel filename")
if canonicalize_name(wheel.name) != self._canonical_name:
- reason = 'wrong project name (not {})'.format(
- self.project_name)
+ reason = "wrong project name (not {})".format(self.project_name)
return (False, reason)
supported_tags = self._target_python.get_tags()
@@ -188,7 +189,7 @@ class LinkEvaluator:
reason = (
"none of the wheel's tags ({}) are compatible "
"(run pip debug --verbose to show compatible tags)".format(
- ', '.join(file_tags)
+ ", ".join(file_tags)
)
)
return (False, reason)
@@ -197,26 +198,28 @@ class LinkEvaluator:
# This should be up by the self.ok_binary check, but see issue 2700.
if "source" not in self._formats and ext != WHEEL_EXTENSION:
- reason = f'No sources permitted for {self.project_name}'
+ reason = f"No sources permitted for {self.project_name}"
return (False, reason)
if not version:
version = _extract_version_from_fragment(
- egg_info, self._canonical_name,
+ egg_info,
+ self._canonical_name,
)
if not version:
- reason = f'Missing project version for {self.project_name}'
+ reason = f"Missing project version for {self.project_name}"
return (False, reason)
match = self._py_version_re.search(version)
if match:
- version = version[:match.start()]
+ version = version[: match.start()]
py_version = match.group(1)
if py_version != self._target_python.py_version:
- return (False, 'Python version is incorrect')
+ return (False, "Python version is incorrect")
supports_python = _check_link_requires_python(
- link, version_info=self._target_python.py_version_info,
+ link,
+ version_info=self._target_python.py_version_info,
ignore_requires_python=self._ignore_requires_python,
)
if not supports_python:
@@ -224,7 +227,7 @@ class LinkEvaluator:
# _log_skipped_link().
return (False, None)
- logger.debug('Found link %s, version: %s', link, version)
+ logger.debug("Found link %s, version: %s", link, version)
return (True, version)
@@ -251,8 +254,8 @@ def filter_unallowed_hashes(
"""
if not hashes:
logger.debug(
- 'Given no hashes to check %s links for project %r: '
- 'discarding no candidates',
+ "Given no hashes to check %s links for project %r: "
+ "discarding no candidates",
len(candidates),
project_name,
)
@@ -282,22 +285,22 @@ def filter_unallowed_hashes(
filtered = list(candidates)
if len(filtered) == len(candidates):
- discard_message = 'discarding no candidates'
+ discard_message = "discarding no candidates"
else:
- discard_message = 'discarding {} non-matches:\n {}'.format(
+ discard_message = "discarding {} non-matches:\n {}".format(
len(non_matches),
- '\n '.join(str(candidate.link) for candidate in non_matches)
+ "\n ".join(str(candidate.link) for candidate in non_matches),
)
logger.debug(
- 'Checked %s links for project %r against %s hashes '
- '(%s matches, %s no digest): %s',
+ "Checked %s links for project %r against %s hashes "
+ "(%s matches, %s no digest): %s",
len(candidates),
project_name,
hashes.digest_count,
match_count,
len(matches_or_no_digest) - match_count,
- discard_message
+ discard_message,
)
return filtered
@@ -354,13 +357,11 @@ class BestCandidateResult:
self.best_candidate = best_candidate
def iter_all(self) -> Iterable[InstallationCandidate]:
- """Iterate through all candidates.
- """
+ """Iterate through all candidates."""
return iter(self._candidates)
def iter_applicable(self) -> Iterable[InstallationCandidate]:
- """Iterate through the applicable candidates.
- """
+ """Iterate through the applicable candidates."""
return iter(self._applicable_candidates)
@@ -444,7 +445,8 @@ class CandidateEvaluator:
allow_prereleases = self._allow_all_prereleases or None
specifier = self._specifier
versions = {
- str(v) for v in specifier.filter(
+ str(v)
+ for v in specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
@@ -458,9 +460,7 @@ class CandidateEvaluator:
}
# Again, converting version to str to deal with debundling.
- applicable_candidates = [
- c for c in candidates if str(c.version) in versions
- ]
+ applicable_candidates = [c for c in candidates if str(c.version) in versions]
filtered_applicable_candidates = filter_unallowed_hashes(
candidates=applicable_candidates,
@@ -509,9 +509,11 @@ class CandidateEvaluator:
# can raise InvalidWheelFilename
wheel = Wheel(link.filename)
try:
- pri = -(wheel.find_most_preferred_tag(
- valid_tags, self._wheel_tag_preferences
- ))
+ pri = -(
+ wheel.find_most_preferred_tag(
+ valid_tags, self._wheel_tag_preferences
+ )
+ )
except ValueError:
raise UnsupportedWheel(
"{} is not a supported wheel for this platform. It "
@@ -520,7 +522,7 @@ class CandidateEvaluator:
if self._prefer_binary:
binary_preference = 1
if wheel.build_tag is not None:
- match = re.match(r'^(\d+)(.*)$', wheel.build_tag)
+ match = re.match(r"^(\d+)(.*)$", wheel.build_tag)
build_tag_groups = match.groups()
build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
else: # sdist
@@ -528,8 +530,12 @@ class CandidateEvaluator:
has_allowed_hash = int(link.is_hash_allowed(self._hashes))
yank_value = -1 * int(link.is_yanked) # -1 for yanked.
return (
- has_allowed_hash, yank_value, binary_preference, candidate.version,
- pri, build_tag,
+ has_allowed_hash,
+ yank_value,
+ binary_preference,
+ candidate.version,
+ pri,
+ build_tag,
)
def sort_best_candidate(
@@ -713,7 +719,7 @@ class PackageFinder:
if link not in self._logged_links:
# Put the link at the end so the reason is more visible and because
# the link string is usually very long.
- logger.debug('Skipping link: %s: %s', reason, link)
+ logger.debug("Skipping link: %s: %s", reason, link)
self._logged_links.add(link)
def get_install_candidate(
@@ -753,7 +759,8 @@ class PackageFinder:
self, project_url: Link, link_evaluator: LinkEvaluator
) -> List[InstallationCandidate]:
logger.debug(
- 'Fetching project page and analyzing links: %s', project_url,
+ "Fetching project page and analyzing links: %s",
+ project_url,
)
html_page = self._link_collector.fetch_page(project_url)
if html_page is None:
@@ -821,8 +828,7 @@ class PackageFinder:
specifier: Optional[specifiers.BaseSpecifier] = None,
hashes: Optional[Hashes] = None,
) -> CandidateEvaluator:
- """Create a CandidateEvaluator object to use.
- """
+ """Create a CandidateEvaluator object to use."""
candidate_prefs = self._candidate_prefs
return CandidateEvaluator.create(
project_name=project_name,
@@ -867,7 +873,9 @@ class PackageFinder:
"""
hashes = req.hashes(trust_internet=False)
best_candidate_result = self.find_best_candidate(
- req.name, specifier=req.specifier, hashes=hashes,
+ req.name,
+ specifier=req.specifier,
+ hashes=hashes,
)
best_candidate = best_candidate_result.best_candidate
@@ -880,41 +888,45 @@ class PackageFinder:
# handle different vendoring sources from pip and pkg_resources.
# If we stop using the pkg_resources provided specifier and start
# using our own, we can drop the cast to str().
- return ", ".join(sorted(
- {str(c.version) for c in cand_iter},
- key=parse_version,
- )) or "none"
+ return (
+ ", ".join(
+ sorted(
+ {str(c.version) for c in cand_iter},
+ key=parse_version,
+ )
+ )
+ or "none"
+ )
if installed_version is None and best_candidate is None:
logger.critical(
- 'Could not find a version that satisfies the requirement %s '
- '(from versions: %s)',
+ "Could not find a version that satisfies the requirement %s "
+ "(from versions: %s)",
req,
_format_versions(best_candidate_result.iter_all()),
)
raise DistributionNotFound(
- 'No matching distribution found for {}'.format(
- req)
+ "No matching distribution found for {}".format(req)
)
best_installed = False
if installed_version and (
- best_candidate is None or
- best_candidate.version <= installed_version):
+ best_candidate is None or best_candidate.version <= installed_version
+ ):
best_installed = True
if not upgrade and installed_version is not None:
if best_installed:
logger.debug(
- 'Existing installed version (%s) is most up-to-date and '
- 'satisfies requirement',
+ "Existing installed version (%s) is most up-to-date and "
+ "satisfies requirement",
installed_version,
)
else:
logger.debug(
- 'Existing installed version (%s) satisfies requirement '
- '(most up-to-date version is %s)',
+ "Existing installed version (%s) satisfies requirement "
+ "(most up-to-date version is %s)",
installed_version,
best_candidate.version,
)
@@ -923,15 +935,14 @@ class PackageFinder:
if best_installed:
# We have an existing version, and its the best version
logger.debug(
- 'Installed version (%s) is most up-to-date (past versions: '
- '%s)',
+ "Installed version (%s) is most up-to-date (past versions: " "%s)",
installed_version,
_format_versions(best_candidate_result.iter_applicable()),
)
raise BestVersionAlreadyInstalled
logger.debug(
- 'Using version %s (newest of versions: %s)',
+ "Using version %s (newest of versions: %s)",
best_candidate.version,
_format_versions(best_candidate_result.iter_applicable()),
)