Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/urllib3/connectionpool.py: 20%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

350 statements  

1from __future__ import absolute_import 

2 

3import errno 

4import logging 

5import re 

6import socket 

7import sys 

8import warnings 

9from socket import error as SocketError 

10from socket import timeout as SocketTimeout 

11 

12from ._collections import HTTPHeaderDict 

13from .connection import ( 

14 BaseSSLError, 

15 BrokenPipeError, 

16 DummyConnection, 

17 HTTPConnection, 

18 HTTPException, 

19 HTTPSConnection, 

20 VerifiedHTTPSConnection, 

21 port_by_scheme, 

22) 

23from .exceptions import ( 

24 ClosedPoolError, 

25 EmptyPoolError, 

26 HeaderParsingError, 

27 HostChangedError, 

28 InsecureRequestWarning, 

29 LocationValueError, 

30 MaxRetryError, 

31 NewConnectionError, 

32 ProtocolError, 

33 ProxyError, 

34 ReadTimeoutError, 

35 SSLError, 

36 TimeoutError, 

37) 

38from .packages import six 

39from .packages.six.moves import queue 

40from .request import RequestMethods 

41from .response import HTTPResponse 

42from .util.connection import is_connection_dropped 

43from .util.proxy import connection_requires_http_tunnel 

44from .util.queue import LifoQueue 

45from .util.request import set_file_position 

46from .util.response import assert_header_parsing 

47from .util.retry import Retry 

48from .util.ssl_match_hostname import CertificateError 

49from .util.timeout import Timeout 

50from .util.url import Url, _encode_target 

51from .util.url import _normalize_host as normalize_host 

52from .util.url import get_host, parse_url 

53 

54try: # Platform-specific: Python 3 

55 import weakref 

56 

57 weakref_finalize = weakref.finalize 

58except AttributeError: # Platform-specific: Python 2 

59 from .packages.backports.weakref_finalize import weakref_finalize 

60 

61xrange = six.moves.xrange 

62 

63log = logging.getLogger(__name__) 

64 

65_Default = object() 

66 

67 

68# Pool objects 

69class ConnectionPool(object): 

70 """ 

71 Base class for all connection pools, such as 

72 :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. 

73 

74 .. note:: 

75 ConnectionPool.urlopen() does not normalize or percent-encode target URIs 

76 which is useful if your target server doesn't support percent-encoded 

77 target URIs. 

78 """ 

79 

80 scheme = None 

81 QueueCls = LifoQueue 

82 

83 def __init__(self, host, port=None): 

84 if not host: 

85 raise LocationValueError("No host specified.") 

86 

87 self.host = _normalize_host(host, scheme=self.scheme) 

88 self._proxy_host = host.lower() 

89 self.port = port 

90 

91 def __str__(self): 

92 return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port) 

93 

94 def __enter__(self): 

95 return self 

96 

97 def __exit__(self, exc_type, exc_val, exc_tb): 

98 self.close() 

99 # Return False to re-raise any potential exceptions 

100 return False 

101 

102 def close(self): 

103 """ 

104 Close all pooled connections and disable the pool. 

105 """ 

106 pass 

107 

108 

109# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252 

110_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK} 

111 

112 

113class HTTPConnectionPool(ConnectionPool, RequestMethods): 

114 """ 

115 Thread-safe connection pool for one host. 

116 

117 :param host: 

118 Host used for this HTTP Connection (e.g. "localhost"), passed into 

119 :class:`http.client.HTTPConnection`. 

120 

121 :param port: 

122 Port used for this HTTP Connection (None is equivalent to 80), passed 

123 into :class:`http.client.HTTPConnection`. 

124 

125 :param strict: 

126 Causes BadStatusLine to be raised if the status line can't be parsed 

127 as a valid HTTP/1.0 or 1.1 status line, passed into 

128 :class:`http.client.HTTPConnection`. 

129 

130 .. note:: 

131 Only works in Python 2. This parameter is ignored in Python 3. 

132 

133 :param timeout: 

134 Socket timeout in seconds for each individual connection. This can 

135 be a float or integer, which sets the timeout for the HTTP request, 

136 or an instance of :class:`urllib3.util.Timeout` which gives you more 

137 fine-grained control over request timeouts. After the constructor has 

138 been parsed, this is always a `urllib3.util.Timeout` object. 

139 

140 :param maxsize: 

141 Number of connections to save that can be reused. More than 1 is useful 

142 in multithreaded situations. If ``block`` is set to False, more 

143 connections will be created but they will not be saved once they've 

144 been used. 

145 

146 :param block: 

147 If set to True, no more than ``maxsize`` connections will be used at 

148 a time. When no free connections are available, the call will block 

149 until a connection has been released. This is a useful side effect for 

150 particular multithreaded situations where one does not want to use more 

151 than maxsize connections per host to prevent flooding. 

152 

153 :param headers: 

154 Headers to include with all requests, unless other headers are given 

155 explicitly. 

156 

157 :param retries: 

158 Retry configuration to use by default with requests in this pool. 

159 

160 :param _proxy: 

161 Parsed proxy URL, should not be used directly, instead, see 

162 :class:`urllib3.ProxyManager` 

163 

164 :param _proxy_headers: 

165 A dictionary with proxy headers, should not be used directly, 

166 instead, see :class:`urllib3.ProxyManager` 

167 

168 :param \\**conn_kw: 

169 Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`, 

170 :class:`urllib3.connection.HTTPSConnection` instances. 

171 """ 

172 

173 scheme = "http" 

174 ConnectionCls = HTTPConnection 

175 ResponseCls = HTTPResponse 

176 

177 def __init__( 

178 self, 

179 host, 

180 port=None, 

181 strict=False, 

182 timeout=Timeout.DEFAULT_TIMEOUT, 

183 maxsize=1, 

184 block=False, 

185 headers=None, 

186 retries=None, 

187 _proxy=None, 

188 _proxy_headers=None, 

189 _proxy_config=None, 

190 **conn_kw 

191 ): 

192 ConnectionPool.__init__(self, host, port) 

193 RequestMethods.__init__(self, headers) 

194 

195 self.strict = strict 

196 

197 if not isinstance(timeout, Timeout): 

198 timeout = Timeout.from_float(timeout) 

199 

200 if retries is None: 

201 retries = Retry.DEFAULT 

202 

203 self.timeout = timeout 

204 self.retries = retries 

205 

206 self.pool = self.QueueCls(maxsize) 

207 self.block = block 

208 

209 self.proxy = _proxy 

210 self.proxy_headers = _proxy_headers or {} 

211 self.proxy_config = _proxy_config 

212 

213 # Fill the queue up so that doing get() on it will block properly 

214 for _ in xrange(maxsize): 

215 self.pool.put(None) 

216 

217 # These are mostly for testing and debugging purposes. 

218 self.num_connections = 0 

219 self.num_requests = 0 

220 self.conn_kw = conn_kw 

221 

222 if self.proxy: 

223 # Enable Nagle's algorithm for proxies, to avoid packet fragmentation. 

224 # We cannot know if the user has added default socket options, so we cannot replace the 

225 # list. 

226 self.conn_kw.setdefault("socket_options", []) 

227 

228 self.conn_kw["proxy"] = self.proxy 

229 self.conn_kw["proxy_config"] = self.proxy_config 

230 

231 # Do not pass 'self' as callback to 'finalize'. 

232 # Then the 'finalize' would keep an endless living (leak) to self. 

233 # By just passing a reference to the pool allows the garbage collector 

234 # to free self if nobody else has a reference to it. 

235 pool = self.pool 

236 

237 # Close all the HTTPConnections in the pool before the 

238 # HTTPConnectionPool object is garbage collected. 

239 weakref_finalize(self, _close_pool_connections, pool) 

240 

241 def _new_conn(self): 

242 """ 

243 Return a fresh :class:`HTTPConnection`. 

244 """ 

245 self.num_connections += 1 

246 log.debug( 

247 "Starting new HTTP connection (%d): %s:%s", 

248 self.num_connections, 

249 self.host, 

250 self.port or "80", 

251 ) 

252 

253 conn = self.ConnectionCls( 

254 host=self.host, 

255 port=self.port, 

256 timeout=self.timeout.connect_timeout, 

257 strict=self.strict, 

258 **self.conn_kw 

259 ) 

260 return conn 

261 

262 def _get_conn(self, timeout=None): 

263 """ 

264 Get a connection. Will return a pooled connection if one is available. 

265 

266 If no connections are available and :prop:`.block` is ``False``, then a 

267 fresh connection is returned. 

268 

269 :param timeout: 

270 Seconds to wait before giving up and raising 

271 :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and 

272 :prop:`.block` is ``True``. 

273 """ 

274 conn = None 

275 try: 

276 conn = self.pool.get(block=self.block, timeout=timeout) 

277 

278 except AttributeError: # self.pool is None 

279 raise ClosedPoolError(self, "Pool is closed.") 

280 

281 except queue.Empty: 

282 if self.block: 

283 raise EmptyPoolError( 

284 self, 

285 "Pool reached maximum size and no more connections are allowed.", 

286 ) 

287 pass # Oh well, we'll create a new connection then 

288 

289 # If this is a persistent connection, check if it got disconnected 

290 if conn and is_connection_dropped(conn): 

291 log.debug("Resetting dropped connection: %s", self.host) 

292 conn.close() 

293 if getattr(conn, "auto_open", 1) == 0: 

294 # This is a proxied connection that has been mutated by 

295 # http.client._tunnel() and cannot be reused (since it would 

296 # attempt to bypass the proxy) 

297 conn = None 

298 

299 return conn or self._new_conn() 

300 

301 def _put_conn(self, conn): 

302 """ 

303 Put a connection back into the pool. 

304 

305 :param conn: 

306 Connection object for the current host and port as returned by 

307 :meth:`._new_conn` or :meth:`._get_conn`. 

308 

309 If the pool is already full, the connection is closed and discarded 

310 because we exceeded maxsize. If connections are discarded frequently, 

311 then maxsize should be increased. 

312 

313 If the pool is closed, then the connection will be closed and discarded. 

314 """ 

315 try: 

316 self.pool.put(conn, block=False) 

317 return # Everything is dandy, done. 

318 except AttributeError: 

319 # self.pool is None. 

320 pass 

321 except queue.Full: 

322 # This should never happen if self.block == True 

323 log.warning( 

324 "Connection pool is full, discarding connection: %s. Connection pool size: %s", 

325 self.host, 

326 self.pool.qsize(), 

327 ) 

328 # Connection never got put back into the pool, close it. 

329 if conn: 

330 conn.close() 

331 

332 def _validate_conn(self, conn): 

333 """ 

334 Called right before a request is made, after the socket is created. 

335 """ 

336 pass 

337 

338 def _prepare_proxy(self, conn): 

339 # Nothing to do for HTTP connections. 

340 pass 

341 

342 def _get_timeout(self, timeout): 

343 """Helper that always returns a :class:`urllib3.util.Timeout`""" 

344 if timeout is _Default: 

345 return self.timeout.clone() 

346 

347 if isinstance(timeout, Timeout): 

348 return timeout.clone() 

349 else: 

350 # User passed us an int/float. This is for backwards compatibility, 

351 # can be removed later 

352 return Timeout.from_float(timeout) 

353 

354 def _raise_timeout(self, err, url, timeout_value): 

355 """Is the error actually a timeout? Will raise a ReadTimeout or pass""" 

356 

357 if isinstance(err, SocketTimeout): 

358 raise ReadTimeoutError( 

359 self, url, "Read timed out. (read timeout=%s)" % timeout_value 

360 ) 

361 

362 # See the above comment about EAGAIN in Python 3. In Python 2 we have 

363 # to specifically catch it and throw the timeout error 

364 if hasattr(err, "errno") and err.errno in _blocking_errnos: 

365 raise ReadTimeoutError( 

366 self, url, "Read timed out. (read timeout=%s)" % timeout_value 

367 ) 

368 

369 # Catch possible read timeouts thrown as SSL errors. If not the 

370 # case, rethrow the original. We need to do this because of: 

371 # http://bugs.python.org/issue10272 

372 if "timed out" in str(err) or "did not complete (read)" in str( 

373 err 

374 ): # Python < 2.7.4 

375 raise ReadTimeoutError( 

376 self, url, "Read timed out. (read timeout=%s)" % timeout_value 

377 ) 

378 

379 def _make_request( 

380 self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw 

381 ): 

382 """ 

383 Perform a request on a given urllib connection object taken from our 

384 pool. 

385 

386 :param conn: 

387 a connection from one of our connection pools 

388 

389 :param timeout: 

390 Socket timeout in seconds for the request. This can be a 

391 float or integer, which will set the same timeout value for 

392 the socket connect and the socket read, or an instance of 

393 :class:`urllib3.util.Timeout`, which gives you more fine-grained 

394 control over your timeouts. 

395 """ 

396 self.num_requests += 1 

397 

398 timeout_obj = self._get_timeout(timeout) 

399 timeout_obj.start_connect() 

400 conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout) 

401 

402 # Trigger any extra validation we need to do. 

403 try: 

404 self._validate_conn(conn) 

405 except (SocketTimeout, BaseSSLError) as e: 

406 # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout. 

407 self._raise_timeout(err=e, url=url, timeout_value=conn.timeout) 

408 raise 

409 

410 # conn.request() calls http.client.*.request, not the method in 

411 # urllib3.request. It also calls makefile (recv) on the socket. 

412 try: 

413 if chunked: 

414 conn.request_chunked(method, url, **httplib_request_kw) 

415 else: 

416 conn.request(method, url, **httplib_request_kw) 

417 

418 # We are swallowing BrokenPipeError (errno.EPIPE) since the server is 

419 # legitimately able to close the connection after sending a valid response. 

420 # With this behaviour, the received response is still readable. 

421 except BrokenPipeError: 

422 # Python 3 

423 pass 

424 except IOError as e: 

425 # Python 2 and macOS/Linux 

426 # EPIPE and ESHUTDOWN are BrokenPipeError on Python 2, and EPROTOTYPE/ECONNRESET are needed on macOS 

427 # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/ 

428 if e.errno not in { 

429 errno.EPIPE, 

430 errno.ESHUTDOWN, 

431 errno.EPROTOTYPE, 

432 errno.ECONNRESET, 

433 }: 

434 raise 

435 

436 # Reset the timeout for the recv() on the socket 

437 read_timeout = timeout_obj.read_timeout 

438 

439 # App Engine doesn't have a sock attr 

440 if getattr(conn, "sock", None): 

441 # In Python 3 socket.py will catch EAGAIN and return None when you 

442 # try and read into the file pointer created by http.client, which 

443 # instead raises a BadStatusLine exception. Instead of catching 

444 # the exception and assuming all BadStatusLine exceptions are read 

445 # timeouts, check for a zero timeout before making the request. 

446 if read_timeout == 0: 

447 raise ReadTimeoutError( 

448 self, url, "Read timed out. (read timeout=%s)" % read_timeout 

449 ) 

450 if read_timeout is Timeout.DEFAULT_TIMEOUT: 

451 conn.sock.settimeout(socket.getdefaulttimeout()) 

452 else: # None or a value 

453 conn.sock.settimeout(read_timeout) 

454 

455 # Receive the response from the server 

456 try: 

457 try: 

458 # Python 2.7, use buffering of HTTP responses 

459 httplib_response = conn.getresponse(buffering=True) 

460 except TypeError: 

461 # Python 3 

462 try: 

463 httplib_response = conn.getresponse() 

464 except BaseException as e: 

465 # Remove the TypeError from the exception chain in 

466 # Python 3 (including for exceptions like SystemExit). 

467 # Otherwise it looks like a bug in the code. 

468 six.raise_from(e, None) 

469 except (SocketTimeout, BaseSSLError, SocketError) as e: 

470 self._raise_timeout(err=e, url=url, timeout_value=read_timeout) 

471 raise 

472 

473 # AppEngine doesn't have a version attr. 

474 http_version = getattr(conn, "_http_vsn_str", "HTTP/?") 

475 log.debug( 

476 '%s://%s:%s "%s %s %s" %s %s', 

477 self.scheme, 

478 self.host, 

479 self.port, 

480 method, 

481 url, 

482 http_version, 

483 httplib_response.status, 

484 httplib_response.length, 

485 ) 

486 

487 try: 

488 assert_header_parsing(httplib_response.msg) 

489 except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3 

490 log.warning( 

491 "Failed to parse headers (url=%s): %s", 

492 self._absolute_url(url), 

493 hpe, 

494 exc_info=True, 

495 ) 

496 

497 return httplib_response 

498 

499 def _absolute_url(self, path): 

500 return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url 

501 

502 def close(self): 

503 """ 

504 Close all pooled connections and disable the pool. 

505 """ 

506 if self.pool is None: 

507 return 

508 # Disable access to the pool 

509 old_pool, self.pool = self.pool, None 

510 

511 # Close all the HTTPConnections in the pool. 

512 _close_pool_connections(old_pool) 

513 

514 def is_same_host(self, url): 

515 """ 

516 Check if the given ``url`` is a member of the same host as this 

517 connection pool. 

518 """ 

519 if url.startswith("/"): 

520 return True 

521 

522 # TODO: Add optional support for socket.gethostbyname checking. 

523 scheme, host, port = get_host(url) 

524 if host is not None: 

525 host = _normalize_host(host, scheme=scheme) 

526 

527 # Use explicit default port for comparison when none is given 

528 if self.port and not port: 

529 port = port_by_scheme.get(scheme) 

530 elif not self.port and port == port_by_scheme.get(scheme): 

531 port = None 

532 

533 return (scheme, host, port) == (self.scheme, self.host, self.port) 

534 

535 def urlopen( 

536 self, 

537 method, 

538 url, 

539 body=None, 

540 headers=None, 

541 retries=None, 

542 redirect=True, 

543 assert_same_host=True, 

544 timeout=_Default, 

545 pool_timeout=None, 

546 release_conn=None, 

547 chunked=False, 

548 body_pos=None, 

549 **response_kw 

550 ): 

551 """ 

552 Get a connection from the pool and perform an HTTP request. This is the 

553 lowest level call for making a request, so you'll need to specify all 

554 the raw details. 

555 

556 .. note:: 

557 

558 More commonly, it's appropriate to use a convenience method provided 

559 by :class:`.RequestMethods`, such as :meth:`request`. 

560 

561 .. note:: 

562 

563 `release_conn` will only behave as expected if 

564 `preload_content=False` because we want to make 

565 `preload_content=False` the default behaviour someday soon without 

566 breaking backwards compatibility. 

567 

568 :param method: 

569 HTTP request method (such as GET, POST, PUT, etc.) 

570 

571 :param url: 

572 The URL to perform the request on. 

573 

574 :param body: 

575 Data to send in the request body, either :class:`str`, :class:`bytes`, 

576 an iterable of :class:`str`/:class:`bytes`, or a file-like object. 

577 

578 :param headers: 

579 Dictionary of custom headers to send, such as User-Agent, 

580 If-None-Match, etc. If None, pool headers are used. If provided, 

581 these headers completely replace any pool-specific headers. 

582 

583 :param retries: 

584 Configure the number of retries to allow before raising a 

585 :class:`~urllib3.exceptions.MaxRetryError` exception. 

586 

587 Pass ``None`` to retry until you receive a response. Pass a 

588 :class:`~urllib3.util.retry.Retry` object for fine-grained control 

589 over different types of retries. 

590 Pass an integer number to retry connection errors that many times, 

591 but no other types of errors. Pass zero to never retry. 

592 

593 If ``False``, then retries are disabled and any exception is raised 

594 immediately. Also, instead of raising a MaxRetryError on redirects, 

595 the redirect response will be returned. 

596 

597 :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int. 

598 

599 :param redirect: 

600 If True, automatically handle redirects (status codes 301, 302, 

601 303, 307, 308). Each redirect counts as a retry. Disabling retries 

602 will disable redirect, too. 

603 

604 :param assert_same_host: 

605 If ``True``, will make sure that the host of the pool requests is 

606 consistent else will raise HostChangedError. When ``False``, you can 

607 use the pool on an HTTP proxy and request foreign hosts. 

608 

609 :param timeout: 

610 If specified, overrides the default timeout for this one 

611 request. It may be a float (in seconds) or an instance of 

612 :class:`urllib3.util.Timeout`. 

613 

614 :param pool_timeout: 

615 If set and the pool is set to block=True, then this method will 

616 block for ``pool_timeout`` seconds and raise EmptyPoolError if no 

617 connection is available within the time period. 

618 

619 :param release_conn: 

620 If False, then the urlopen call will not release the connection 

621 back into the pool once a response is received (but will release if 

622 you read the entire contents of the response such as when 

623 `preload_content=True`). This is useful if you're not preloading 

624 the response's content immediately. You will need to call 

625 ``r.release_conn()`` on the response ``r`` to return the connection 

626 back into the pool. If None, it takes the value of 

627 ``response_kw.get('preload_content', True)``. 

628 

629 :param chunked: 

630 If True, urllib3 will send the body using chunked transfer 

631 encoding. Otherwise, urllib3 will send the body using the standard 

632 content-length form. Defaults to False. 

633 

634 :param int body_pos: 

635 Position to seek to in file-like body in the event of a retry or 

636 redirect. Typically this won't need to be set because urllib3 will 

637 auto-populate the value when needed. 

638 

639 :param \\**response_kw: 

640 Additional parameters are passed to 

641 :meth:`urllib3.response.HTTPResponse.from_httplib` 

642 """ 

643 

644 parsed_url = parse_url(url) 

645 destination_scheme = parsed_url.scheme 

646 

647 if headers is None: 

648 headers = self.headers 

649 

650 if not isinstance(retries, Retry): 

651 retries = Retry.from_int(retries, redirect=redirect, default=self.retries) 

652 

653 if release_conn is None: 

654 release_conn = response_kw.get("preload_content", True) 

655 

656 # Check host 

657 if assert_same_host and not self.is_same_host(url): 

658 raise HostChangedError(self, url, retries) 

659 

660 # Ensure that the URL we're connecting to is properly encoded 

661 if url.startswith("/"): 

662 url = six.ensure_str(_encode_target(url)) 

663 else: 

664 url = six.ensure_str(parsed_url.url) 

665 

666 conn = None 

667 

668 # Track whether `conn` needs to be released before 

669 # returning/raising/recursing. Update this variable if necessary, and 

670 # leave `release_conn` constant throughout the function. That way, if 

671 # the function recurses, the original value of `release_conn` will be 

672 # passed down into the recursive call, and its value will be respected. 

673 # 

674 # See issue #651 [1] for details. 

675 # 

676 # [1] <https://github.com/urllib3/urllib3/issues/651> 

677 release_this_conn = release_conn 

678 

679 http_tunnel_required = connection_requires_http_tunnel( 

680 self.proxy, self.proxy_config, destination_scheme 

681 ) 

682 

683 # Merge the proxy headers. Only done when not using HTTP CONNECT. We 

684 # have to copy the headers dict so we can safely change it without those 

685 # changes being reflected in anyone else's copy. 

686 if not http_tunnel_required: 

687 headers = headers.copy() 

688 headers.update(self.proxy_headers) 

689 

690 # Must keep the exception bound to a separate variable or else Python 3 

691 # complains about UnboundLocalError. 

692 err = None 

693 

694 # Keep track of whether we cleanly exited the except block. This 

695 # ensures we do proper cleanup in finally. 

696 clean_exit = False 

697 

698 # Rewind body position, if needed. Record current position 

699 # for future rewinds in the event of a redirect/retry. 

700 body_pos = set_file_position(body, body_pos) 

701 

702 try: 

703 # Request a connection from the queue. 

704 timeout_obj = self._get_timeout(timeout) 

705 conn = self._get_conn(timeout=pool_timeout) 

706 

707 conn.timeout = timeout_obj.connect_timeout 

708 

709 is_new_proxy_conn = self.proxy is not None and not getattr( 

710 conn, "sock", None 

711 ) 

712 if is_new_proxy_conn and http_tunnel_required: 

713 self._prepare_proxy(conn) 

714 

715 # Make the request on the httplib connection object. 

716 httplib_response = self._make_request( 

717 conn, 

718 method, 

719 url, 

720 timeout=timeout_obj, 

721 body=body, 

722 headers=headers, 

723 chunked=chunked, 

724 ) 

725 

726 # If we're going to release the connection in ``finally:``, then 

727 # the response doesn't need to know about the connection. Otherwise 

728 # it will also try to release it and we'll have a double-release 

729 # mess. 

730 response_conn = conn if not release_conn else None 

731 

732 # Pass method to Response for length checking 

733 response_kw["request_method"] = method 

734 

735 # Import httplib's response into our own wrapper object 

736 response = self.ResponseCls.from_httplib( 

737 httplib_response, 

738 pool=self, 

739 connection=response_conn, 

740 retries=retries, 

741 **response_kw 

742 ) 

743 

744 # Everything went great! 

745 clean_exit = True 

746 

747 except EmptyPoolError: 

748 # Didn't get a connection from the pool, no need to clean up 

749 clean_exit = True 

750 release_this_conn = False 

751 raise 

752 

753 except ( 

754 TimeoutError, 

755 HTTPException, 

756 SocketError, 

757 ProtocolError, 

758 BaseSSLError, 

759 SSLError, 

760 CertificateError, 

761 ) as e: 

762 # Discard the connection for these exceptions. It will be 

763 # replaced during the next _get_conn() call. 

764 clean_exit = False 

765 

766 def _is_ssl_error_message_from_http_proxy(ssl_error): 

767 # We're trying to detect the message 'WRONG_VERSION_NUMBER' but 

768 # SSLErrors are kinda all over the place when it comes to the message, 

769 # so we try to cover our bases here! 

770 message = " ".join(re.split("[^a-z]", str(ssl_error).lower())) 

771 return ( 

772 "wrong version number" in message 

773 or "unknown protocol" in message 

774 or "record layer failure" in message 

775 ) 

776 

777 # Try to detect a common user error with proxies which is to 

778 # set an HTTP proxy to be HTTPS when it should be 'http://' 

779 # (ie {'http': 'http://proxy', 'https': 'https://proxy'}) 

780 # Instead we add a nice error message and point to a URL. 

781 if ( 

782 isinstance(e, BaseSSLError) 

783 and self.proxy 

784 and _is_ssl_error_message_from_http_proxy(e) 

785 and conn.proxy 

786 and conn.proxy.scheme == "https" 

787 ): 

788 e = ProxyError( 

789 "Your proxy appears to only use HTTP and not HTTPS, " 

790 "try changing your proxy URL to be HTTP. See: " 

791 "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" 

792 "#https-proxy-error-http-proxy", 

793 SSLError(e), 

794 ) 

795 elif isinstance(e, (BaseSSLError, CertificateError)): 

796 e = SSLError(e) 

797 elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy: 

798 e = ProxyError("Cannot connect to proxy.", e) 

799 elif isinstance(e, (SocketError, HTTPException)): 

800 e = ProtocolError("Connection aborted.", e) 

801 

802 retries = retries.increment( 

803 method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2] 

804 ) 

805 retries.sleep() 

806 

807 # Keep track of the error for the retry warning. 

808 err = e 

809 

810 finally: 

811 if not clean_exit: 

812 # We hit some kind of exception, handled or otherwise. We need 

813 # to throw the connection away unless explicitly told not to. 

814 # Close the connection, set the variable to None, and make sure 

815 # we put the None back in the pool to avoid leaking it. 

816 conn = conn and conn.close() 

817 release_this_conn = True 

818 

819 if release_this_conn: 

820 # Put the connection back to be reused. If the connection is 

821 # expired then it will be None, which will get replaced with a 

822 # fresh connection during _get_conn. 

823 self._put_conn(conn) 

824 

825 if not conn: 

826 # Try again 

827 log.warning( 

828 "Retrying (%r) after connection broken by '%r': %s", retries, err, url 

829 ) 

830 return self.urlopen( 

831 method, 

832 url, 

833 body, 

834 headers, 

835 retries, 

836 redirect, 

837 assert_same_host, 

838 timeout=timeout, 

839 pool_timeout=pool_timeout, 

840 release_conn=release_conn, 

841 chunked=chunked, 

842 body_pos=body_pos, 

843 **response_kw 

844 ) 

845 

846 # Handle redirect? 

847 redirect_location = redirect and response.get_redirect_location() 

848 if redirect_location: 

849 if response.status == 303: 

850 # Change the method according to RFC 9110, Section 15.4.4. 

851 method = "GET" 

852 # And lose the body not to transfer anything sensitive. 

853 body = None 

854 headers = HTTPHeaderDict(headers)._prepare_for_method_change() 

855 

856 try: 

857 retries = retries.increment(method, url, response=response, _pool=self) 

858 except MaxRetryError: 

859 if retries.raise_on_redirect: 

860 response.drain_conn() 

861 raise 

862 return response 

863 

864 response.drain_conn() 

865 retries.sleep_for_retry(response) 

866 log.debug("Redirecting %s -> %s", url, redirect_location) 

867 return self.urlopen( 

868 method, 

869 redirect_location, 

870 body, 

871 headers, 

872 retries=retries, 

873 redirect=redirect, 

874 assert_same_host=assert_same_host, 

875 timeout=timeout, 

876 pool_timeout=pool_timeout, 

877 release_conn=release_conn, 

878 chunked=chunked, 

879 body_pos=body_pos, 

880 **response_kw 

881 ) 

882 

883 # Check if we should retry the HTTP response. 

884 has_retry_after = bool(response.headers.get("Retry-After")) 

885 if retries.is_retry(method, response.status, has_retry_after): 

886 try: 

887 retries = retries.increment(method, url, response=response, _pool=self) 

888 except MaxRetryError: 

889 if retries.raise_on_status: 

890 response.drain_conn() 

891 raise 

892 return response 

893 

894 response.drain_conn() 

895 retries.sleep(response) 

896 log.debug("Retry: %s", url) 

897 return self.urlopen( 

898 method, 

899 url, 

900 body, 

901 headers, 

902 retries=retries, 

903 redirect=redirect, 

904 assert_same_host=assert_same_host, 

905 timeout=timeout, 

906 pool_timeout=pool_timeout, 

907 release_conn=release_conn, 

908 chunked=chunked, 

909 body_pos=body_pos, 

910 **response_kw 

911 ) 

912 

913 return response 

914 

915 

916class HTTPSConnectionPool(HTTPConnectionPool): 

917 """ 

918 Same as :class:`.HTTPConnectionPool`, but HTTPS. 

919 

920 :class:`.HTTPSConnection` uses one of ``assert_fingerprint``, 

921 ``assert_hostname`` and ``host`` in this order to verify connections. 

922 If ``assert_hostname`` is False, no verification is done. 

923 

924 The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, 

925 ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl` 

926 is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade 

927 the connection socket into an SSL socket. 

928 """ 

929 

930 scheme = "https" 

931 ConnectionCls = HTTPSConnection 

932 

933 def __init__( 

934 self, 

935 host, 

936 port=None, 

937 strict=False, 

938 timeout=Timeout.DEFAULT_TIMEOUT, 

939 maxsize=1, 

940 block=False, 

941 headers=None, 

942 retries=None, 

943 _proxy=None, 

944 _proxy_headers=None, 

945 key_file=None, 

946 cert_file=None, 

947 cert_reqs=None, 

948 key_password=None, 

949 ca_certs=None, 

950 ssl_version=None, 

951 assert_hostname=None, 

952 assert_fingerprint=None, 

953 ca_cert_dir=None, 

954 **conn_kw 

955 ): 

956 

957 HTTPConnectionPool.__init__( 

958 self, 

959 host, 

960 port, 

961 strict, 

962 timeout, 

963 maxsize, 

964 block, 

965 headers, 

966 retries, 

967 _proxy, 

968 _proxy_headers, 

969 **conn_kw 

970 ) 

971 

972 self.key_file = key_file 

973 self.cert_file = cert_file 

974 self.cert_reqs = cert_reqs 

975 self.key_password = key_password 

976 self.ca_certs = ca_certs 

977 self.ca_cert_dir = ca_cert_dir 

978 self.ssl_version = ssl_version 

979 self.assert_hostname = assert_hostname 

980 self.assert_fingerprint = assert_fingerprint 

981 

982 def _prepare_conn(self, conn): 

983 """ 

984 Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket` 

985 and establish the tunnel if proxy is used. 

986 """ 

987 

988 if isinstance(conn, VerifiedHTTPSConnection): 

989 conn.set_cert( 

990 key_file=self.key_file, 

991 key_password=self.key_password, 

992 cert_file=self.cert_file, 

993 cert_reqs=self.cert_reqs, 

994 ca_certs=self.ca_certs, 

995 ca_cert_dir=self.ca_cert_dir, 

996 assert_hostname=self.assert_hostname, 

997 assert_fingerprint=self.assert_fingerprint, 

998 ) 

999 conn.ssl_version = self.ssl_version 

1000 return conn 

1001 

1002 def _prepare_proxy(self, conn): 

1003 """ 

1004 Establishes a tunnel connection through HTTP CONNECT. 

1005 

1006 Tunnel connection is established early because otherwise httplib would 

1007 improperly set Host: header to proxy's IP:port. 

1008 """ 

1009 

1010 conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers) 

1011 

1012 if self.proxy.scheme == "https": 

1013 conn.tls_in_tls_required = True 

1014 

1015 conn.connect() 

1016 

1017 def _new_conn(self): 

1018 """ 

1019 Return a fresh :class:`http.client.HTTPSConnection`. 

1020 """ 

1021 self.num_connections += 1 

1022 log.debug( 

1023 "Starting new HTTPS connection (%d): %s:%s", 

1024 self.num_connections, 

1025 self.host, 

1026 self.port or "443", 

1027 ) 

1028 

1029 if not self.ConnectionCls or self.ConnectionCls is DummyConnection: 

1030 raise SSLError( 

1031 "Can't connect to HTTPS URL because the SSL module is not available." 

1032 ) 

1033 

1034 actual_host = self.host 

1035 actual_port = self.port 

1036 if self.proxy is not None: 

1037 actual_host = self.proxy.host 

1038 actual_port = self.proxy.port 

1039 

1040 conn = self.ConnectionCls( 

1041 host=actual_host, 

1042 port=actual_port, 

1043 timeout=self.timeout.connect_timeout, 

1044 strict=self.strict, 

1045 cert_file=self.cert_file, 

1046 key_file=self.key_file, 

1047 key_password=self.key_password, 

1048 **self.conn_kw 

1049 ) 

1050 

1051 return self._prepare_conn(conn) 

1052 

1053 def _validate_conn(self, conn): 

1054 """ 

1055 Called right before a request is made, after the socket is created. 

1056 """ 

1057 super(HTTPSConnectionPool, self)._validate_conn(conn) 

1058 

1059 # Force connect early to allow us to validate the connection. 

1060 if not getattr(conn, "sock", None): # AppEngine might not have `.sock` 

1061 conn.connect() 

1062 

1063 if not conn.is_verified: 

1064 warnings.warn( 

1065 ( 

1066 "Unverified HTTPS request is being made to host '%s'. " 

1067 "Adding certificate verification is strongly advised. See: " 

1068 "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" 

1069 "#ssl-warnings" % conn.host 

1070 ), 

1071 InsecureRequestWarning, 

1072 ) 

1073 

1074 if getattr(conn, "proxy_is_verified", None) is False: 

1075 warnings.warn( 

1076 ( 

1077 "Unverified HTTPS connection done to an HTTPS proxy. " 

1078 "Adding certificate verification is strongly advised. See: " 

1079 "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html" 

1080 "#ssl-warnings" 

1081 ), 

1082 InsecureRequestWarning, 

1083 ) 

1084 

1085 

1086def connection_from_url(url, **kw): 

1087 """ 

1088 Given a url, return an :class:`.ConnectionPool` instance of its host. 

1089 

1090 This is a shortcut for not having to parse out the scheme, host, and port 

1091 of the url before creating an :class:`.ConnectionPool` instance. 

1092 

1093 :param url: 

1094 Absolute URL string that must include the scheme. Port is optional. 

1095 

1096 :param \\**kw: 

1097 Passes additional parameters to the constructor of the appropriate 

1098 :class:`.ConnectionPool`. Useful for specifying things like 

1099 timeout, maxsize, headers, etc. 

1100 

1101 Example:: 

1102 

1103 >>> conn = connection_from_url('http://google.com/') 

1104 >>> r = conn.request('GET', '/') 

1105 """ 

1106 scheme, host, port = get_host(url) 

1107 port = port or port_by_scheme.get(scheme, 80) 

1108 if scheme == "https": 

1109 return HTTPSConnectionPool(host, port=port, **kw) 

1110 else: 

1111 return HTTPConnectionPool(host, port=port, **kw) 

1112 

1113 

1114def _normalize_host(host, scheme): 

1115 """ 

1116 Normalize hosts for comparisons and use with sockets. 

1117 """ 

1118 

1119 host = normalize_host(host, scheme) 

1120 

1121 # httplib doesn't like it when we include brackets in IPv6 addresses 

1122 # Specifically, if we include brackets but also pass the port then 

1123 # httplib crazily doubles up the square brackets on the Host header. 

1124 # Instead, we need to make sure we never pass ``None`` as the port. 

1125 # However, for backward compatibility reasons we can't actually 

1126 # *assert* that. See http://bugs.python.org/issue28539 

1127 if host.startswith("[") and host.endswith("]"): 

1128 host = host[1:-1] 

1129 return host 

1130 

1131 

1132def _close_pool_connections(pool): 

1133 """Drains a queue of connections and closes each one.""" 

1134 try: 

1135 while True: 

1136 conn = pool.get(block=False) 

1137 if conn: 

1138 conn.close() 

1139 except queue.Empty: 

1140 pass # Done.