Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/pip/_internal/network/utils.py: 22%
27 statements
« prev ^ index » next coverage.py v7.4.3, created at 2024-02-26 06:33 +0000
« prev ^ index » next coverage.py v7.4.3, created at 2024-02-26 06:33 +0000
1from typing import Dict, Generator
3from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
5from pip._internal.exceptions import NetworkConnectionError
7# The following comments and HTTP headers were originally added by
8# Donald Stufft in git commit 22c562429a61bb77172039e480873fb239dd8c03.
9#
10# We use Accept-Encoding: identity here because requests defaults to
11# accepting compressed responses. This breaks in a variety of ways
12# depending on how the server is configured.
13# - Some servers will notice that the file isn't a compressible file
14# and will leave the file alone and with an empty Content-Encoding
15# - Some servers will notice that the file is already compressed and
16# will leave the file alone, adding a Content-Encoding: gzip header
17# - Some servers won't notice anything at all and will take a file
18# that's already been compressed and compress it again, and set
19# the Content-Encoding: gzip header
20# By setting this to request only the identity encoding we're hoping
21# to eliminate the third case. Hopefully there does not exist a server
22# which when given a file will notice it is already compressed and that
23# you're not asking for a compressed file and will then decompress it
24# before sending because if that's the case I don't think it'll ever be
25# possible to make this work.
26HEADERS: Dict[str, str] = {"Accept-Encoding": "identity"}
29def raise_for_status(resp: Response) -> None:
30 http_error_msg = ""
31 if isinstance(resp.reason, bytes):
32 # We attempt to decode utf-8 first because some servers
33 # choose to localize their reason strings. If the string
34 # isn't utf-8, we fall back to iso-8859-1 for all other
35 # encodings.
36 try:
37 reason = resp.reason.decode("utf-8")
38 except UnicodeDecodeError:
39 reason = resp.reason.decode("iso-8859-1")
40 else:
41 reason = resp.reason
43 if 400 <= resp.status_code < 500:
44 http_error_msg = (
45 f"{resp.status_code} Client Error: {reason} for url: {resp.url}"
46 )
48 elif 500 <= resp.status_code < 600:
49 http_error_msg = (
50 f"{resp.status_code} Server Error: {reason} for url: {resp.url}"
51 )
53 if http_error_msg:
54 raise NetworkConnectionError(http_error_msg, response=resp)
57def response_chunks(
58 response: Response, chunk_size: int = CONTENT_CHUNK_SIZE
59) -> Generator[bytes, None, None]:
60 """Given a requests Response, provide the data chunks."""
61 try:
62 # Special case for urllib3.
63 for chunk in response.raw.stream(
64 chunk_size,
65 # We use decode_content=False here because we don't
66 # want urllib3 to mess with the raw bytes we get
67 # from the server. If we decompress inside of
68 # urllib3 then we cannot verify the checksum
69 # because the checksum will be of the compressed
70 # file. This breakage will only occur if the
71 # server adds a Content-Encoding header, which
72 # depends on how the server was configured:
73 # - Some servers will notice that the file isn't a
74 # compressible file and will leave the file alone
75 # and with an empty Content-Encoding
76 # - Some servers will notice that the file is
77 # already compressed and will leave the file
78 # alone and will add a Content-Encoding: gzip
79 # header
80 # - Some servers won't notice anything at all and
81 # will take a file that's already been compressed
82 # and compress it again and set the
83 # Content-Encoding: gzip header
84 #
85 # By setting this not to decode automatically we
86 # hope to eliminate problems with the second case.
87 decode_content=False,
88 ):
89 yield chunk
90 except AttributeError:
91 # Standard file-like object.
92 while True:
93 chunk = response.raw.read(chunk_size)
94 if not chunk:
95 break
96 yield chunk