Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/h11/_connection.py: 65%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

217 statements  

1# This contains the main Connection class. Everything in h11 revolves around 

2# this. 

3from typing import ( 

4 Any, 

5 Callable, 

6 cast, 

7 Dict, 

8 List, 

9 Optional, 

10 overload, 

11 Tuple, 

12 Type, 

13 Union, 

14) 

15 

16from ._events import ( 

17 ConnectionClosed, 

18 Data, 

19 EndOfMessage, 

20 Event, 

21 InformationalResponse, 

22 Request, 

23 Response, 

24) 

25from ._headers import get_comma_header, has_expect_100_continue, set_comma_header 

26from ._readers import READERS, ReadersType 

27from ._receivebuffer import ReceiveBuffer 

28from ._state import ( 

29 _SWITCH_CONNECT, 

30 _SWITCH_UPGRADE, 

31 CLIENT, 

32 ConnectionState, 

33 DONE, 

34 ERROR, 

35 MIGHT_SWITCH_PROTOCOL, 

36 SEND_BODY, 

37 SERVER, 

38 SWITCHED_PROTOCOL, 

39) 

40from ._util import ( # Import the internal things we need 

41 LocalProtocolError, 

42 RemoteProtocolError, 

43 Sentinel, 

44) 

45from ._writers import WRITERS, WritersType 

46 

47# Everything in __all__ gets re-exported as part of the h11 public API. 

48__all__ = ["Connection", "NEED_DATA", "PAUSED"] 

49 

50 

51class NEED_DATA(Sentinel, metaclass=Sentinel): 

52 pass 

53 

54 

55class PAUSED(Sentinel, metaclass=Sentinel): 

56 pass 

57 

58 

59# If we ever have this much buffered without it making a complete parseable 

60# event, we error out. The only time we really buffer is when reading the 

61# request/response line + headers together, so this is effectively the limit on 

62# the size of that. 

63# 

64# Some precedents for defaults: 

65# - node.js: 80 * 1024 

66# - tomcat: 8 * 1024 

67# - IIS: 16 * 1024 

68# - Apache: <8 KiB per line> 

69DEFAULT_MAX_INCOMPLETE_EVENT_SIZE = 16 * 1024 

70 

71 

72# RFC 7230's rules for connection lifecycles: 

73# - If either side says they want to close the connection, then the connection 

74# must close. 

75# - HTTP/1.1 defaults to keep-alive unless someone says Connection: close 

76# - HTTP/1.0 defaults to close unless both sides say Connection: keep-alive 

77# (and even this is a mess -- e.g. if you're implementing a proxy then 

78# sending Connection: keep-alive is forbidden). 

79# 

80# We simplify life by simply not supporting keep-alive with HTTP/1.0 peers. So 

81# our rule is: 

82# - If someone says Connection: close, we will close 

83# - If someone uses HTTP/1.0, we will close. 

84def _keep_alive(event: Union[Request, Response]) -> bool: 

85 connection = get_comma_header(event.headers, b"connection") 

86 if b"close" in connection: 

87 return False 

88 if getattr(event, "http_version", b"1.1") < b"1.1": 

89 return False 

90 return True 

91 

92 

93def _body_framing( 

94 request_method: bytes, event: Union[Request, Response] 

95) -> Tuple[str, Union[Tuple[()], Tuple[int]]]: 

96 # Called when we enter SEND_BODY to figure out framing information for 

97 # this body. 

98 # 

99 # These are the only two events that can trigger a SEND_BODY state: 

100 assert type(event) in (Request, Response) 

101 # Returns one of: 

102 # 

103 # ("content-length", count) 

104 # ("chunked", ()) 

105 # ("http/1.0", ()) 

106 # 

107 # which are (lookup key, *args) for constructing body reader/writer 

108 # objects. 

109 # 

110 # Reference: https://tools.ietf.org/html/rfc7230#section-3.3.3 

111 # 

112 # Step 1: some responses always have an empty body, regardless of what the 

113 # headers say. 

114 if type(event) is Response: 

115 if ( 

116 event.status_code in (204, 304) 

117 or request_method == b"HEAD" 

118 or (request_method == b"CONNECT" and 200 <= event.status_code < 300) 

119 ): 

120 return ("content-length", (0,)) 

121 # Section 3.3.3 also lists another case -- responses with status_code 

122 # < 200. For us these are InformationalResponses, not Responses, so 

123 # they can't get into this function in the first place. 

124 assert event.status_code >= 200 

125 

126 # Step 2: check for Transfer-Encoding (T-E beats C-L): 

127 transfer_encodings = get_comma_header(event.headers, b"transfer-encoding") 

128 if transfer_encodings: 

129 assert transfer_encodings == [b"chunked"] 

130 return ("chunked", ()) 

131 

132 # Step 3: check for Content-Length 

133 content_lengths = get_comma_header(event.headers, b"content-length") 

134 if content_lengths: 

135 return ("content-length", (int(content_lengths[0]),)) 

136 

137 # Step 4: no applicable headers; fallback/default depends on type 

138 if type(event) is Request: 

139 return ("content-length", (0,)) 

140 else: 

141 return ("http/1.0", ()) 

142 

143 

144################################################################ 

145# 

146# The main Connection class 

147# 

148################################################################ 

149 

150 

151class Connection: 

152 """An object encapsulating the state of an HTTP connection. 

153 

154 Args: 

155 our_role: If you're implementing a client, pass :data:`h11.CLIENT`. If 

156 you're implementing a server, pass :data:`h11.SERVER`. 

157 

158 max_incomplete_event_size (int): 

159 The maximum number of bytes we're willing to buffer of an 

160 incomplete event. In practice this mostly sets a limit on the 

161 maximum size of the request/response line + headers. If this is 

162 exceeded, then :meth:`next_event` will raise 

163 :exc:`RemoteProtocolError`. 

164 

165 """ 

166 

167 def __init__( 

168 self, 

169 our_role: Type[Sentinel], 

170 max_incomplete_event_size: int = DEFAULT_MAX_INCOMPLETE_EVENT_SIZE, 

171 ) -> None: 

172 self._max_incomplete_event_size = max_incomplete_event_size 

173 # State and role tracking 

174 if our_role not in (CLIENT, SERVER): 

175 raise ValueError(f"expected CLIENT or SERVER, not {our_role!r}") 

176 self.our_role = our_role 

177 self.their_role: Type[Sentinel] 

178 if our_role is CLIENT: 

179 self.their_role = SERVER 

180 else: 

181 self.their_role = CLIENT 

182 self._cstate = ConnectionState() 

183 

184 # Callables for converting data->events or vice-versa given the 

185 # current state 

186 self._writer = self._get_io_object(self.our_role, None, WRITERS) 

187 self._reader = self._get_io_object(self.their_role, None, READERS) 

188 

189 # Holds any unprocessed received data 

190 self._receive_buffer = ReceiveBuffer() 

191 # If this is true, then it indicates that the incoming connection was 

192 # closed *after* the end of whatever's in self._receive_buffer: 

193 self._receive_buffer_closed = False 

194 

195 # Extra bits of state that don't fit into the state machine. 

196 # 

197 # These two are only used to interpret framing headers for figuring 

198 # out how to read/write response bodies. their_http_version is also 

199 # made available as a convenient public API. 

200 self.their_http_version: Optional[bytes] = None 

201 self._request_method: Optional[bytes] = None 

202 # This is pure flow-control and doesn't at all affect the set of legal 

203 # transitions, so no need to bother ConnectionState with it: 

204 self.client_is_waiting_for_100_continue = False 

205 

206 @property 

207 def states(self) -> Dict[Type[Sentinel], Type[Sentinel]]: 

208 """A dictionary like:: 

209 

210 {CLIENT: <client state>, SERVER: <server state>} 

211 

212 See :ref:`state-machine` for details. 

213 

214 """ 

215 return dict(self._cstate.states) 

216 

217 @property 

218 def our_state(self) -> Type[Sentinel]: 

219 """The current state of whichever role we are playing. See 

220 :ref:`state-machine` for details. 

221 """ 

222 return self._cstate.states[self.our_role] 

223 

224 @property 

225 def their_state(self) -> Type[Sentinel]: 

226 """The current state of whichever role we are NOT playing. See 

227 :ref:`state-machine` for details. 

228 """ 

229 return self._cstate.states[self.their_role] 

230 

231 @property 

232 def they_are_waiting_for_100_continue(self) -> bool: 

233 return self.their_role is CLIENT and self.client_is_waiting_for_100_continue 

234 

235 def start_next_cycle(self) -> None: 

236 """Attempt to reset our connection state for a new request/response 

237 cycle. 

238 

239 If both client and server are in :data:`DONE` state, then resets them 

240 both to :data:`IDLE` state in preparation for a new request/response 

241 cycle on this same connection. Otherwise, raises a 

242 :exc:`LocalProtocolError`. 

243 

244 See :ref:`keepalive-and-pipelining`. 

245 

246 """ 

247 old_states = dict(self._cstate.states) 

248 self._cstate.start_next_cycle() 

249 self._request_method = None 

250 # self.their_http_version gets left alone, since it presumably lasts 

251 # beyond a single request/response cycle 

252 assert not self.client_is_waiting_for_100_continue 

253 self._respond_to_state_changes(old_states) 

254 

255 def _process_error(self, role: Type[Sentinel]) -> None: 

256 old_states = dict(self._cstate.states) 

257 self._cstate.process_error(role) 

258 self._respond_to_state_changes(old_states) 

259 

260 def _server_switch_event(self, event: Event) -> Optional[Type[Sentinel]]: 

261 if type(event) is InformationalResponse and event.status_code == 101: 

262 return _SWITCH_UPGRADE 

263 if type(event) is Response: 

264 if ( 

265 _SWITCH_CONNECT in self._cstate.pending_switch_proposals 

266 and 200 <= event.status_code < 300 

267 ): 

268 return _SWITCH_CONNECT 

269 return None 

270 

271 # All events go through here 

272 def _process_event(self, role: Type[Sentinel], event: Event) -> None: 

273 # First, pass the event through the state machine to make sure it 

274 # succeeds. 

275 old_states = dict(self._cstate.states) 

276 if role is CLIENT and type(event) is Request: 

277 if event.method == b"CONNECT": 

278 self._cstate.process_client_switch_proposal(_SWITCH_CONNECT) 

279 if get_comma_header(event.headers, b"upgrade"): 

280 self._cstate.process_client_switch_proposal(_SWITCH_UPGRADE) 

281 server_switch_event = None 

282 if role is SERVER: 

283 server_switch_event = self._server_switch_event(event) 

284 self._cstate.process_event(role, type(event), server_switch_event) 

285 

286 # Then perform the updates triggered by it. 

287 

288 if type(event) is Request: 

289 self._request_method = event.method 

290 

291 if role is self.their_role and type(event) in ( 

292 Request, 

293 Response, 

294 InformationalResponse, 

295 ): 

296 event = cast(Union[Request, Response, InformationalResponse], event) 

297 self.their_http_version = event.http_version 

298 

299 # Keep alive handling 

300 # 

301 # RFC 7230 doesn't really say what one should do if Connection: close 

302 # shows up on a 1xx InformationalResponse. I think the idea is that 

303 # this is not supposed to happen. In any case, if it does happen, we 

304 # ignore it. 

305 if type(event) in (Request, Response) and not _keep_alive( 

306 cast(Union[Request, Response], event) 

307 ): 

308 self._cstate.process_keep_alive_disabled() 

309 

310 # 100-continue 

311 if type(event) is Request and has_expect_100_continue(event): 

312 self.client_is_waiting_for_100_continue = True 

313 if type(event) in (InformationalResponse, Response): 

314 self.client_is_waiting_for_100_continue = False 

315 if role is CLIENT and type(event) in (Data, EndOfMessage): 

316 self.client_is_waiting_for_100_continue = False 

317 

318 self._respond_to_state_changes(old_states, event) 

319 

320 def _get_io_object( 

321 self, 

322 role: Type[Sentinel], 

323 event: Optional[Event], 

324 io_dict: Union[ReadersType, WritersType], 

325 ) -> Optional[Callable[..., Any]]: 

326 # event may be None; it's only used when entering SEND_BODY 

327 state = self._cstate.states[role] 

328 if state is SEND_BODY: 

329 # Special case: the io_dict has a dict of reader/writer factories 

330 # that depend on the request/response framing. 

331 framing_type, args = _body_framing( 

332 cast(bytes, self._request_method), cast(Union[Request, Response], event) 

333 ) 

334 return io_dict[SEND_BODY][framing_type](*args) # type: ignore[index] 

335 else: 

336 # General case: the io_dict just has the appropriate reader/writer 

337 # for this state 

338 return io_dict.get((role, state)) # type: ignore[return-value] 

339 

340 # This must be called after any action that might have caused 

341 # self._cstate.states to change. 

342 def _respond_to_state_changes( 

343 self, 

344 old_states: Dict[Type[Sentinel], Type[Sentinel]], 

345 event: Optional[Event] = None, 

346 ) -> None: 

347 # Update reader/writer 

348 if self.our_state != old_states[self.our_role]: 

349 self._writer = self._get_io_object(self.our_role, event, WRITERS) 

350 if self.their_state != old_states[self.their_role]: 

351 self._reader = self._get_io_object(self.their_role, event, READERS) 

352 

353 @property 

354 def trailing_data(self) -> Tuple[bytes, bool]: 

355 """Data that has been received, but not yet processed, represented as 

356 a tuple with two elements, where the first is a byte-string containing 

357 the unprocessed data itself, and the second is a bool that is True if 

358 the receive connection was closed. 

359 

360 See :ref:`switching-protocols` for discussion of why you'd want this. 

361 """ 

362 return (bytes(self._receive_buffer), self._receive_buffer_closed) 

363 

364 def receive_data(self, data: bytes) -> None: 

365 """Add data to our internal receive buffer. 

366 

367 This does not actually do any processing on the data, just stores 

368 it. To trigger processing, you have to call :meth:`next_event`. 

369 

370 Args: 

371 data (:term:`bytes-like object`): 

372 The new data that was just received. 

373 

374 Special case: If *data* is an empty byte-string like ``b""``, 

375 then this indicates that the remote side has closed the 

376 connection (end of file). Normally this is convenient, because 

377 standard Python APIs like :meth:`file.read` or 

378 :meth:`socket.recv` use ``b""`` to indicate end-of-file, while 

379 other failures to read are indicated using other mechanisms 

380 like raising :exc:`TimeoutError`. When using such an API you 

381 can just blindly pass through whatever you get from ``read`` 

382 to :meth:`receive_data`, and everything will work. 

383 

384 But, if you have an API where reading an empty string is a 

385 valid non-EOF condition, then you need to be aware of this and 

386 make sure to check for such strings and avoid passing them to 

387 :meth:`receive_data`. 

388 

389 Returns: 

390 Nothing, but after calling this you should call :meth:`next_event` 

391 to parse the newly received data. 

392 

393 Raises: 

394 RuntimeError: 

395 Raised if you pass an empty *data*, indicating EOF, and then 

396 pass a non-empty *data*, indicating more data that somehow 

397 arrived after the EOF. 

398 

399 (Calling ``receive_data(b"")`` multiple times is fine, 

400 and equivalent to calling it once.) 

401 

402 """ 

403 if data: 

404 if self._receive_buffer_closed: 

405 raise RuntimeError("received close, then received more data?") 

406 self._receive_buffer += data 

407 else: 

408 self._receive_buffer_closed = True 

409 

410 def _extract_next_receive_event( 

411 self, 

412 ) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]: 

413 state = self.their_state 

414 # We don't pause immediately when they enter DONE, because even in 

415 # DONE state we can still process a ConnectionClosed() event. But 

416 # if we have data in our buffer, then we definitely aren't getting 

417 # a ConnectionClosed() immediately and we need to pause. 

418 if state is DONE and self._receive_buffer: 

419 return PAUSED 

420 if state is MIGHT_SWITCH_PROTOCOL or state is SWITCHED_PROTOCOL: 

421 return PAUSED 

422 assert self._reader is not None 

423 event = self._reader(self._receive_buffer) 

424 if event is None: 

425 if not self._receive_buffer and self._receive_buffer_closed: 

426 # In some unusual cases (basically just HTTP/1.0 bodies), EOF 

427 # triggers an actual protocol event; in that case, we want to 

428 # return that event, and then the state will change and we'll 

429 # get called again to generate the actual ConnectionClosed(). 

430 if hasattr(self._reader, "read_eof"): 

431 event = self._reader.read_eof() 

432 else: 

433 event = ConnectionClosed() 

434 if event is None: 

435 event = NEED_DATA 

436 return event # type: ignore[no-any-return] 

437 

438 def next_event(self) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]: 

439 """Parse the next event out of our receive buffer, update our internal 

440 state, and return it. 

441 

442 This is a mutating operation -- think of it like calling :func:`next` 

443 on an iterator. 

444 

445 Returns: 

446 : One of three things: 

447 

448 1) An event object -- see :ref:`events`. 

449 

450 2) The special constant :data:`NEED_DATA`, which indicates that 

451 you need to read more data from your socket and pass it to 

452 :meth:`receive_data` before this method will be able to return 

453 any more events. 

454 

455 3) The special constant :data:`PAUSED`, which indicates that we 

456 are not in a state where we can process incoming data (usually 

457 because the peer has finished their part of the current 

458 request/response cycle, and you have not yet called 

459 :meth:`start_next_cycle`). See :ref:`flow-control` for details. 

460 

461 Raises: 

462 RemoteProtocolError: 

463 The peer has misbehaved. You should close the connection 

464 (possibly after sending some kind of 4xx response). 

465 

466 Once this method returns :class:`ConnectionClosed` once, then all 

467 subsequent calls will also return :class:`ConnectionClosed`. 

468 

469 If this method raises any exception besides :exc:`RemoteProtocolError` 

470 then that's a bug -- if it happens please file a bug report! 

471 

472 If this method raises any exception then it also sets 

473 :attr:`Connection.their_state` to :data:`ERROR` -- see 

474 :ref:`error-handling` for discussion. 

475 

476 """ 

477 

478 if self.their_state is ERROR: 

479 raise RemoteProtocolError("Can't receive data when peer state is ERROR") 

480 try: 

481 event = self._extract_next_receive_event() 

482 if event not in [NEED_DATA, PAUSED]: 

483 self._process_event(self.their_role, cast(Event, event)) 

484 if event is NEED_DATA: 

485 if len(self._receive_buffer) > self._max_incomplete_event_size: 

486 # 431 is "Request header fields too large" which is pretty 

487 # much the only situation where we can get here 

488 raise RemoteProtocolError( 

489 "Receive buffer too long", error_status_hint=431 

490 ) 

491 if self._receive_buffer_closed: 

492 # We're still trying to complete some event, but that's 

493 # never going to happen because no more data is coming 

494 raise RemoteProtocolError("peer unexpectedly closed connection") 

495 return event 

496 except BaseException as exc: 

497 self._process_error(self.their_role) 

498 if isinstance(exc, LocalProtocolError): 

499 exc._reraise_as_remote_protocol_error() 

500 else: 

501 raise 

502 

503 @overload 

504 def send(self, event: ConnectionClosed) -> None: 

505 ... 

506 

507 @overload 

508 def send( 

509 self, event: Union[Request, InformationalResponse, Response, Data, EndOfMessage] 

510 ) -> bytes: 

511 ... 

512 

513 @overload 

514 def send(self, event: Event) -> Optional[bytes]: 

515 ... 

516 

517 def send(self, event: Event) -> Optional[bytes]: 

518 """Convert a high-level event into bytes that can be sent to the peer, 

519 while updating our internal state machine. 

520 

521 Args: 

522 event: The :ref:`event <events>` to send. 

523 

524 Returns: 

525 If ``type(event) is ConnectionClosed``, then returns 

526 ``None``. Otherwise, returns a :term:`bytes-like object`. 

527 

528 Raises: 

529 LocalProtocolError: 

530 Sending this event at this time would violate our 

531 understanding of the HTTP/1.1 protocol. 

532 

533 If this method raises any exception then it also sets 

534 :attr:`Connection.our_state` to :data:`ERROR` -- see 

535 :ref:`error-handling` for discussion. 

536 

537 """ 

538 data_list = self.send_with_data_passthrough(event) 

539 if data_list is None: 

540 return None 

541 else: 

542 return b"".join(data_list) 

543 

544 def send_with_data_passthrough(self, event: Event) -> Optional[List[bytes]]: 

545 """Identical to :meth:`send`, except that in situations where 

546 :meth:`send` returns a single :term:`bytes-like object`, this instead 

547 returns a list of them -- and when sending a :class:`Data` event, this 

548 list is guaranteed to contain the exact object you passed in as 

549 :attr:`Data.data`. See :ref:`sendfile` for discussion. 

550 

551 """ 

552 if self.our_state is ERROR: 

553 raise LocalProtocolError("Can't send data when our state is ERROR") 

554 try: 

555 if type(event) is Response: 

556 event = self._clean_up_response_headers_for_sending(event) 

557 # We want to call _process_event before calling the writer, 

558 # because if someone tries to do something invalid then this will 

559 # give a sensible error message, while our writers all just assume 

560 # they will only receive valid events. But, _process_event might 

561 # change self._writer. So we have to do a little dance: 

562 writer = self._writer 

563 self._process_event(self.our_role, event) 

564 if type(event) is ConnectionClosed: 

565 return None 

566 else: 

567 # In any situation where writer is None, process_event should 

568 # have raised ProtocolError 

569 assert writer is not None 

570 data_list: List[bytes] = [] 

571 writer(event, data_list.append) 

572 return data_list 

573 except: 

574 self._process_error(self.our_role) 

575 raise 

576 

577 def send_failed(self) -> None: 

578 """Notify the state machine that we failed to send the data it gave 

579 us. 

580 

581 This causes :attr:`Connection.our_state` to immediately become 

582 :data:`ERROR` -- see :ref:`error-handling` for discussion. 

583 

584 """ 

585 self._process_error(self.our_role) 

586 

587 # When sending a Response, we take responsibility for a few things: 

588 # 

589 # - Sometimes you MUST set Connection: close. We take care of those 

590 # times. (You can also set it yourself if you want, and if you do then 

591 # we'll respect that and close the connection at the right time. But you 

592 # don't have to worry about that unless you want to.) 

593 # 

594 # - The user has to set Content-Length if they want it. Otherwise, for 

595 # responses that have bodies (e.g. not HEAD), then we will automatically 

596 # select the right mechanism for streaming a body of unknown length, 

597 # which depends on depending on the peer's HTTP version. 

598 # 

599 # This function's *only* responsibility is making sure headers are set up 

600 # right -- everything downstream just looks at the headers. There are no 

601 # side channels. 

602 def _clean_up_response_headers_for_sending(self, response: Response) -> Response: 

603 assert type(response) is Response 

604 

605 headers = response.headers 

606 need_close = False 

607 

608 # HEAD requests need some special handling: they always act like they 

609 # have Content-Length: 0, and that's how _body_framing treats 

610 # them. But their headers are supposed to match what we would send if 

611 # the request was a GET. (Technically there is one deviation allowed: 

612 # we're allowed to leave out the framing headers -- see 

613 # https://tools.ietf.org/html/rfc7231#section-4.3.2 . But it's just as 

614 # easy to get them right.) 

615 method_for_choosing_headers = cast(bytes, self._request_method) 

616 if method_for_choosing_headers == b"HEAD": 

617 method_for_choosing_headers = b"GET" 

618 framing_type, _ = _body_framing(method_for_choosing_headers, response) 

619 if framing_type in ("chunked", "http/1.0"): 

620 # This response has a body of unknown length. 

621 # If our peer is HTTP/1.1, we use Transfer-Encoding: chunked 

622 # If our peer is HTTP/1.0, we use no framing headers, and close the 

623 # connection afterwards. 

624 # 

625 # Make sure to clear Content-Length (in principle user could have 

626 # set both and then we ignored Content-Length b/c 

627 # Transfer-Encoding overwrote it -- this would be naughty of them, 

628 # but the HTTP spec says that if our peer does this then we have 

629 # to fix it instead of erroring out, so we'll accord the user the 

630 # same respect). 

631 headers = set_comma_header(headers, b"content-length", []) 

632 if self.their_http_version is None or self.their_http_version < b"1.1": 

633 # Either we never got a valid request and are sending back an 

634 # error (their_http_version is None), so we assume the worst; 

635 # or else we did get a valid HTTP/1.0 request, so we know that 

636 # they don't understand chunked encoding. 

637 headers = set_comma_header(headers, b"transfer-encoding", []) 

638 # This is actually redundant ATM, since currently we 

639 # unconditionally disable keep-alive when talking to HTTP/1.0 

640 # peers. But let's be defensive just in case we add 

641 # Connection: keep-alive support later: 

642 if self._request_method != b"HEAD": 

643 need_close = True 

644 else: 

645 headers = set_comma_header(headers, b"transfer-encoding", [b"chunked"]) 

646 

647 if not self._cstate.keep_alive or need_close: 

648 # Make sure Connection: close is set 

649 connection = set(get_comma_header(headers, b"connection")) 

650 connection.discard(b"keep-alive") 

651 connection.add(b"close") 

652 headers = set_comma_header(headers, b"connection", sorted(connection)) 

653 

654 return Response( 

655 headers=headers, 

656 status_code=response.status_code, 

657 http_version=response.http_version, 

658 reason=response.reason, 

659 )