Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/h11/_connection.py: 20%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

208 statements  

1# This contains the main Connection class. Everything in h11 revolves around 

2# this. 

3from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Type, Union 

4 

5from ._events import ( 

6 ConnectionClosed, 

7 Data, 

8 EndOfMessage, 

9 Event, 

10 InformationalResponse, 

11 Request, 

12 Response, 

13) 

14from ._headers import get_comma_header, has_expect_100_continue, set_comma_header 

15from ._readers import READERS, ReadersType 

16from ._receivebuffer import ReceiveBuffer 

17from ._state import ( 

18 _SWITCH_CONNECT, 

19 _SWITCH_UPGRADE, 

20 CLIENT, 

21 ConnectionState, 

22 DONE, 

23 ERROR, 

24 MIGHT_SWITCH_PROTOCOL, 

25 SEND_BODY, 

26 SERVER, 

27 SWITCHED_PROTOCOL, 

28) 

29from ._util import ( # Import the internal things we need 

30 LocalProtocolError, 

31 RemoteProtocolError, 

32 Sentinel, 

33) 

34from ._writers import WRITERS, WritersType 

35 

36# Everything in __all__ gets re-exported as part of the h11 public API. 

37__all__ = ["Connection", "NEED_DATA", "PAUSED"] 

38 

39 

40class NEED_DATA(Sentinel, metaclass=Sentinel): 

41 pass 

42 

43 

44class PAUSED(Sentinel, metaclass=Sentinel): 

45 pass 

46 

47 

48# If we ever have this much buffered without it making a complete parseable 

49# event, we error out. The only time we really buffer is when reading the 

50# request/response line + headers together, so this is effectively the limit on 

51# the size of that. 

52# 

53# Some precedents for defaults: 

54# - node.js: 80 * 1024 

55# - tomcat: 8 * 1024 

56# - IIS: 16 * 1024 

57# - Apache: <8 KiB per line> 

58DEFAULT_MAX_INCOMPLETE_EVENT_SIZE = 16 * 1024 

59 

60# RFC 7230's rules for connection lifecycles: 

61# - If either side says they want to close the connection, then the connection 

62# must close. 

63# - HTTP/1.1 defaults to keep-alive unless someone says Connection: close 

64# - HTTP/1.0 defaults to close unless both sides say Connection: keep-alive 

65# (and even this is a mess -- e.g. if you're implementing a proxy then 

66# sending Connection: keep-alive is forbidden). 

67# 

68# We simplify life by simply not supporting keep-alive with HTTP/1.0 peers. So 

69# our rule is: 

70# - If someone says Connection: close, we will close 

71# - If someone uses HTTP/1.0, we will close. 

72def _keep_alive(event: Union[Request, Response]) -> bool: 

73 connection = get_comma_header(event.headers, b"connection") 

74 if b"close" in connection: 

75 return False 

76 if getattr(event, "http_version", b"1.1") < b"1.1": 

77 return False 

78 return True 

79 

80 

81def _body_framing( 

82 request_method: bytes, event: Union[Request, Response] 

83) -> Tuple[str, Union[Tuple[()], Tuple[int]]]: 

84 # Called when we enter SEND_BODY to figure out framing information for 

85 # this body. 

86 # 

87 # These are the only two events that can trigger a SEND_BODY state: 

88 assert type(event) in (Request, Response) 

89 # Returns one of: 

90 # 

91 # ("content-length", count) 

92 # ("chunked", ()) 

93 # ("http/1.0", ()) 

94 # 

95 # which are (lookup key, *args) for constructing body reader/writer 

96 # objects. 

97 # 

98 # Reference: https://tools.ietf.org/html/rfc7230#section-3.3.3 

99 # 

100 # Step 1: some responses always have an empty body, regardless of what the 

101 # headers say. 

102 if type(event) is Response: 

103 if ( 

104 event.status_code in (204, 304) 

105 or request_method == b"HEAD" 

106 or (request_method == b"CONNECT" and 200 <= event.status_code < 300) 

107 ): 

108 return ("content-length", (0,)) 

109 # Section 3.3.3 also lists another case -- responses with status_code 

110 # < 200. For us these are InformationalResponses, not Responses, so 

111 # they can't get into this function in the first place. 

112 assert event.status_code >= 200 

113 

114 # Step 2: check for Transfer-Encoding (T-E beats C-L): 

115 transfer_encodings = get_comma_header(event.headers, b"transfer-encoding") 

116 if transfer_encodings: 

117 assert transfer_encodings == [b"chunked"] 

118 return ("chunked", ()) 

119 

120 # Step 3: check for Content-Length 

121 content_lengths = get_comma_header(event.headers, b"content-length") 

122 if content_lengths: 

123 return ("content-length", (int(content_lengths[0]),)) 

124 

125 # Step 4: no applicable headers; fallback/default depends on type 

126 if type(event) is Request: 

127 return ("content-length", (0,)) 

128 else: 

129 return ("http/1.0", ()) 

130 

131 

132################################################################ 

133# 

134# The main Connection class 

135# 

136################################################################ 

137 

138 

139class Connection: 

140 """An object encapsulating the state of an HTTP connection. 

141 

142 Args: 

143 our_role: If you're implementing a client, pass :data:`h11.CLIENT`. If 

144 you're implementing a server, pass :data:`h11.SERVER`. 

145 

146 max_incomplete_event_size (int): 

147 The maximum number of bytes we're willing to buffer of an 

148 incomplete event. In practice this mostly sets a limit on the 

149 maximum size of the request/response line + headers. If this is 

150 exceeded, then :meth:`next_event` will raise 

151 :exc:`RemoteProtocolError`. 

152 

153 """ 

154 

155 def __init__( 

156 self, 

157 our_role: Type[Sentinel], 

158 max_incomplete_event_size: int = DEFAULT_MAX_INCOMPLETE_EVENT_SIZE, 

159 ) -> None: 

160 self._max_incomplete_event_size = max_incomplete_event_size 

161 # State and role tracking 

162 if our_role not in (CLIENT, SERVER): 

163 raise ValueError("expected CLIENT or SERVER, not {!r}".format(our_role)) 

164 self.our_role = our_role 

165 self.their_role: Type[Sentinel] 

166 if our_role is CLIENT: 

167 self.their_role = SERVER 

168 else: 

169 self.their_role = CLIENT 

170 self._cstate = ConnectionState() 

171 

172 # Callables for converting data->events or vice-versa given the 

173 # current state 

174 self._writer = self._get_io_object(self.our_role, None, WRITERS) 

175 self._reader = self._get_io_object(self.their_role, None, READERS) 

176 

177 # Holds any unprocessed received data 

178 self._receive_buffer = ReceiveBuffer() 

179 # If this is true, then it indicates that the incoming connection was 

180 # closed *after* the end of whatever's in self._receive_buffer: 

181 self._receive_buffer_closed = False 

182 

183 # Extra bits of state that don't fit into the state machine. 

184 # 

185 # These two are only used to interpret framing headers for figuring 

186 # out how to read/write response bodies. their_http_version is also 

187 # made available as a convenient public API. 

188 self.their_http_version: Optional[bytes] = None 

189 self._request_method: Optional[bytes] = None 

190 # This is pure flow-control and doesn't at all affect the set of legal 

191 # transitions, so no need to bother ConnectionState with it: 

192 self.client_is_waiting_for_100_continue = False 

193 

194 @property 

195 def states(self) -> Dict[Type[Sentinel], Type[Sentinel]]: 

196 """A dictionary like:: 

197 

198 {CLIENT: <client state>, SERVER: <server state>} 

199 

200 See :ref:`state-machine` for details. 

201 

202 """ 

203 return dict(self._cstate.states) 

204 

205 @property 

206 def our_state(self) -> Type[Sentinel]: 

207 """The current state of whichever role we are playing. See 

208 :ref:`state-machine` for details. 

209 """ 

210 return self._cstate.states[self.our_role] 

211 

212 @property 

213 def their_state(self) -> Type[Sentinel]: 

214 """The current state of whichever role we are NOT playing. See 

215 :ref:`state-machine` for details. 

216 """ 

217 return self._cstate.states[self.their_role] 

218 

219 @property 

220 def they_are_waiting_for_100_continue(self) -> bool: 

221 return self.their_role is CLIENT and self.client_is_waiting_for_100_continue 

222 

223 def start_next_cycle(self) -> None: 

224 """Attempt to reset our connection state for a new request/response 

225 cycle. 

226 

227 If both client and server are in :data:`DONE` state, then resets them 

228 both to :data:`IDLE` state in preparation for a new request/response 

229 cycle on this same connection. Otherwise, raises a 

230 :exc:`LocalProtocolError`. 

231 

232 See :ref:`keepalive-and-pipelining`. 

233 

234 """ 

235 old_states = dict(self._cstate.states) 

236 self._cstate.start_next_cycle() 

237 self._request_method = None 

238 # self.their_http_version gets left alone, since it presumably lasts 

239 # beyond a single request/response cycle 

240 assert not self.client_is_waiting_for_100_continue 

241 self._respond_to_state_changes(old_states) 

242 

243 def _process_error(self, role: Type[Sentinel]) -> None: 

244 old_states = dict(self._cstate.states) 

245 self._cstate.process_error(role) 

246 self._respond_to_state_changes(old_states) 

247 

248 def _server_switch_event(self, event: Event) -> Optional[Type[Sentinel]]: 

249 if type(event) is InformationalResponse and event.status_code == 101: 

250 return _SWITCH_UPGRADE 

251 if type(event) is Response: 

252 if ( 

253 _SWITCH_CONNECT in self._cstate.pending_switch_proposals 

254 and 200 <= event.status_code < 300 

255 ): 

256 return _SWITCH_CONNECT 

257 return None 

258 

259 # All events go through here 

260 def _process_event(self, role: Type[Sentinel], event: Event) -> None: 

261 # First, pass the event through the state machine to make sure it 

262 # succeeds. 

263 old_states = dict(self._cstate.states) 

264 if role is CLIENT and type(event) is Request: 

265 if event.method == b"CONNECT": 

266 self._cstate.process_client_switch_proposal(_SWITCH_CONNECT) 

267 if get_comma_header(event.headers, b"upgrade"): 

268 self._cstate.process_client_switch_proposal(_SWITCH_UPGRADE) 

269 server_switch_event = None 

270 if role is SERVER: 

271 server_switch_event = self._server_switch_event(event) 

272 self._cstate.process_event(role, type(event), server_switch_event) 

273 

274 # Then perform the updates triggered by it. 

275 

276 if type(event) is Request: 

277 self._request_method = event.method 

278 

279 if role is self.their_role and type(event) in ( 

280 Request, 

281 Response, 

282 InformationalResponse, 

283 ): 

284 event = cast(Union[Request, Response, InformationalResponse], event) 

285 self.their_http_version = event.http_version 

286 

287 # Keep alive handling 

288 # 

289 # RFC 7230 doesn't really say what one should do if Connection: close 

290 # shows up on a 1xx InformationalResponse. I think the idea is that 

291 # this is not supposed to happen. In any case, if it does happen, we 

292 # ignore it. 

293 if type(event) in (Request, Response) and not _keep_alive( 

294 cast(Union[Request, Response], event) 

295 ): 

296 self._cstate.process_keep_alive_disabled() 

297 

298 # 100-continue 

299 if type(event) is Request and has_expect_100_continue(event): 

300 self.client_is_waiting_for_100_continue = True 

301 if type(event) in (InformationalResponse, Response): 

302 self.client_is_waiting_for_100_continue = False 

303 if role is CLIENT and type(event) in (Data, EndOfMessage): 

304 self.client_is_waiting_for_100_continue = False 

305 

306 self._respond_to_state_changes(old_states, event) 

307 

308 def _get_io_object( 

309 self, 

310 role: Type[Sentinel], 

311 event: Optional[Event], 

312 io_dict: Union[ReadersType, WritersType], 

313 ) -> Optional[Callable[..., Any]]: 

314 # event may be None; it's only used when entering SEND_BODY 

315 state = self._cstate.states[role] 

316 if state is SEND_BODY: 

317 # Special case: the io_dict has a dict of reader/writer factories 

318 # that depend on the request/response framing. 

319 framing_type, args = _body_framing( 

320 cast(bytes, self._request_method), cast(Union[Request, Response], event) 

321 ) 

322 return io_dict[SEND_BODY][framing_type](*args) # type: ignore[index] 

323 else: 

324 # General case: the io_dict just has the appropriate reader/writer 

325 # for this state 

326 return io_dict.get((role, state)) # type: ignore[return-value] 

327 

328 # This must be called after any action that might have caused 

329 # self._cstate.states to change. 

330 def _respond_to_state_changes( 

331 self, 

332 old_states: Dict[Type[Sentinel], Type[Sentinel]], 

333 event: Optional[Event] = None, 

334 ) -> None: 

335 # Update reader/writer 

336 if self.our_state != old_states[self.our_role]: 

337 self._writer = self._get_io_object(self.our_role, event, WRITERS) 

338 if self.their_state != old_states[self.their_role]: 

339 self._reader = self._get_io_object(self.their_role, event, READERS) 

340 

341 @property 

342 def trailing_data(self) -> Tuple[bytes, bool]: 

343 """Data that has been received, but not yet processed, represented as 

344 a tuple with two elements, where the first is a byte-string containing 

345 the unprocessed data itself, and the second is a bool that is True if 

346 the receive connection was closed. 

347 

348 See :ref:`switching-protocols` for discussion of why you'd want this. 

349 """ 

350 return (bytes(self._receive_buffer), self._receive_buffer_closed) 

351 

352 def receive_data(self, data: bytes) -> None: 

353 """Add data to our internal receive buffer. 

354 

355 This does not actually do any processing on the data, just stores 

356 it. To trigger processing, you have to call :meth:`next_event`. 

357 

358 Args: 

359 data (:term:`bytes-like object`): 

360 The new data that was just received. 

361 

362 Special case: If *data* is an empty byte-string like ``b""``, 

363 then this indicates that the remote side has closed the 

364 connection (end of file). Normally this is convenient, because 

365 standard Python APIs like :meth:`file.read` or 

366 :meth:`socket.recv` use ``b""`` to indicate end-of-file, while 

367 other failures to read are indicated using other mechanisms 

368 like raising :exc:`TimeoutError`. When using such an API you 

369 can just blindly pass through whatever you get from ``read`` 

370 to :meth:`receive_data`, and everything will work. 

371 

372 But, if you have an API where reading an empty string is a 

373 valid non-EOF condition, then you need to be aware of this and 

374 make sure to check for such strings and avoid passing them to 

375 :meth:`receive_data`. 

376 

377 Returns: 

378 Nothing, but after calling this you should call :meth:`next_event` 

379 to parse the newly received data. 

380 

381 Raises: 

382 RuntimeError: 

383 Raised if you pass an empty *data*, indicating EOF, and then 

384 pass a non-empty *data*, indicating more data that somehow 

385 arrived after the EOF. 

386 

387 (Calling ``receive_data(b"")`` multiple times is fine, 

388 and equivalent to calling it once.) 

389 

390 """ 

391 if data: 

392 if self._receive_buffer_closed: 

393 raise RuntimeError("received close, then received more data?") 

394 self._receive_buffer += data 

395 else: 

396 self._receive_buffer_closed = True 

397 

398 def _extract_next_receive_event( 

399 self, 

400 ) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]: 

401 state = self.their_state 

402 # We don't pause immediately when they enter DONE, because even in 

403 # DONE state we can still process a ConnectionClosed() event. But 

404 # if we have data in our buffer, then we definitely aren't getting 

405 # a ConnectionClosed() immediately and we need to pause. 

406 if state is DONE and self._receive_buffer: 

407 return PAUSED 

408 if state is MIGHT_SWITCH_PROTOCOL or state is SWITCHED_PROTOCOL: 

409 return PAUSED 

410 assert self._reader is not None 

411 event = self._reader(self._receive_buffer) 

412 if event is None: 

413 if not self._receive_buffer and self._receive_buffer_closed: 

414 # In some unusual cases (basically just HTTP/1.0 bodies), EOF 

415 # triggers an actual protocol event; in that case, we want to 

416 # return that event, and then the state will change and we'll 

417 # get called again to generate the actual ConnectionClosed(). 

418 if hasattr(self._reader, "read_eof"): 

419 event = self._reader.read_eof() # type: ignore[attr-defined] 

420 else: 

421 event = ConnectionClosed() 

422 if event is None: 

423 event = NEED_DATA 

424 return event # type: ignore[no-any-return] 

425 

426 def next_event(self) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]: 

427 """Parse the next event out of our receive buffer, update our internal 

428 state, and return it. 

429 

430 This is a mutating operation -- think of it like calling :func:`next` 

431 on an iterator. 

432 

433 Returns: 

434 : One of three things: 

435 

436 1) An event object -- see :ref:`events`. 

437 

438 2) The special constant :data:`NEED_DATA`, which indicates that 

439 you need to read more data from your socket and pass it to 

440 :meth:`receive_data` before this method will be able to return 

441 any more events. 

442 

443 3) The special constant :data:`PAUSED`, which indicates that we 

444 are not in a state where we can process incoming data (usually 

445 because the peer has finished their part of the current 

446 request/response cycle, and you have not yet called 

447 :meth:`start_next_cycle`). See :ref:`flow-control` for details. 

448 

449 Raises: 

450 RemoteProtocolError: 

451 The peer has misbehaved. You should close the connection 

452 (possibly after sending some kind of 4xx response). 

453 

454 Once this method returns :class:`ConnectionClosed` once, then all 

455 subsequent calls will also return :class:`ConnectionClosed`. 

456 

457 If this method raises any exception besides :exc:`RemoteProtocolError` 

458 then that's a bug -- if it happens please file a bug report! 

459 

460 If this method raises any exception then it also sets 

461 :attr:`Connection.their_state` to :data:`ERROR` -- see 

462 :ref:`error-handling` for discussion. 

463 

464 """ 

465 

466 if self.their_state is ERROR: 

467 raise RemoteProtocolError("Can't receive data when peer state is ERROR") 

468 try: 

469 event = self._extract_next_receive_event() 

470 if event not in [NEED_DATA, PAUSED]: 

471 self._process_event(self.their_role, cast(Event, event)) 

472 if event is NEED_DATA: 

473 if len(self._receive_buffer) > self._max_incomplete_event_size: 

474 # 431 is "Request header fields too large" which is pretty 

475 # much the only situation where we can get here 

476 raise RemoteProtocolError( 

477 "Receive buffer too long", error_status_hint=431 

478 ) 

479 if self._receive_buffer_closed: 

480 # We're still trying to complete some event, but that's 

481 # never going to happen because no more data is coming 

482 raise RemoteProtocolError("peer unexpectedly closed connection") 

483 return event 

484 except BaseException as exc: 

485 self._process_error(self.their_role) 

486 if isinstance(exc, LocalProtocolError): 

487 exc._reraise_as_remote_protocol_error() 

488 else: 

489 raise 

490 

491 def send(self, event: Event) -> Optional[bytes]: 

492 """Convert a high-level event into bytes that can be sent to the peer, 

493 while updating our internal state machine. 

494 

495 Args: 

496 event: The :ref:`event <events>` to send. 

497 

498 Returns: 

499 If ``type(event) is ConnectionClosed``, then returns 

500 ``None``. Otherwise, returns a :term:`bytes-like object`. 

501 

502 Raises: 

503 LocalProtocolError: 

504 Sending this event at this time would violate our 

505 understanding of the HTTP/1.1 protocol. 

506 

507 If this method raises any exception then it also sets 

508 :attr:`Connection.our_state` to :data:`ERROR` -- see 

509 :ref:`error-handling` for discussion. 

510 

511 """ 

512 data_list = self.send_with_data_passthrough(event) 

513 if data_list is None: 

514 return None 

515 else: 

516 return b"".join(data_list) 

517 

518 def send_with_data_passthrough(self, event: Event) -> Optional[List[bytes]]: 

519 """Identical to :meth:`send`, except that in situations where 

520 :meth:`send` returns a single :term:`bytes-like object`, this instead 

521 returns a list of them -- and when sending a :class:`Data` event, this 

522 list is guaranteed to contain the exact object you passed in as 

523 :attr:`Data.data`. See :ref:`sendfile` for discussion. 

524 

525 """ 

526 if self.our_state is ERROR: 

527 raise LocalProtocolError("Can't send data when our state is ERROR") 

528 try: 

529 if type(event) is Response: 

530 event = self._clean_up_response_headers_for_sending(event) 

531 # We want to call _process_event before calling the writer, 

532 # because if someone tries to do something invalid then this will 

533 # give a sensible error message, while our writers all just assume 

534 # they will only receive valid events. But, _process_event might 

535 # change self._writer. So we have to do a little dance: 

536 writer = self._writer 

537 self._process_event(self.our_role, event) 

538 if type(event) is ConnectionClosed: 

539 return None 

540 else: 

541 # In any situation where writer is None, process_event should 

542 # have raised ProtocolError 

543 assert writer is not None 

544 data_list: List[bytes] = [] 

545 writer(event, data_list.append) 

546 return data_list 

547 except: 

548 self._process_error(self.our_role) 

549 raise 

550 

551 def send_failed(self) -> None: 

552 """Notify the state machine that we failed to send the data it gave 

553 us. 

554 

555 This causes :attr:`Connection.our_state` to immediately become 

556 :data:`ERROR` -- see :ref:`error-handling` for discussion. 

557 

558 """ 

559 self._process_error(self.our_role) 

560 

561 # When sending a Response, we take responsibility for a few things: 

562 # 

563 # - Sometimes you MUST set Connection: close. We take care of those 

564 # times. (You can also set it yourself if you want, and if you do then 

565 # we'll respect that and close the connection at the right time. But you 

566 # don't have to worry about that unless you want to.) 

567 # 

568 # - The user has to set Content-Length if they want it. Otherwise, for 

569 # responses that have bodies (e.g. not HEAD), then we will automatically 

570 # select the right mechanism for streaming a body of unknown length, 

571 # which depends on depending on the peer's HTTP version. 

572 # 

573 # This function's *only* responsibility is making sure headers are set up 

574 # right -- everything downstream just looks at the headers. There are no 

575 # side channels. 

576 def _clean_up_response_headers_for_sending(self, response: Response) -> Response: 

577 assert type(response) is Response 

578 

579 headers = response.headers 

580 need_close = False 

581 

582 # HEAD requests need some special handling: they always act like they 

583 # have Content-Length: 0, and that's how _body_framing treats 

584 # them. But their headers are supposed to match what we would send if 

585 # the request was a GET. (Technically there is one deviation allowed: 

586 # we're allowed to leave out the framing headers -- see 

587 # https://tools.ietf.org/html/rfc7231#section-4.3.2 . But it's just as 

588 # easy to get them right.) 

589 method_for_choosing_headers = cast(bytes, self._request_method) 

590 if method_for_choosing_headers == b"HEAD": 

591 method_for_choosing_headers = b"GET" 

592 framing_type, _ = _body_framing(method_for_choosing_headers, response) 

593 if framing_type in ("chunked", "http/1.0"): 

594 # This response has a body of unknown length. 

595 # If our peer is HTTP/1.1, we use Transfer-Encoding: chunked 

596 # If our peer is HTTP/1.0, we use no framing headers, and close the 

597 # connection afterwards. 

598 # 

599 # Make sure to clear Content-Length (in principle user could have 

600 # set both and then we ignored Content-Length b/c 

601 # Transfer-Encoding overwrote it -- this would be naughty of them, 

602 # but the HTTP spec says that if our peer does this then we have 

603 # to fix it instead of erroring out, so we'll accord the user the 

604 # same respect). 

605 headers = set_comma_header(headers, b"content-length", []) 

606 if self.their_http_version is None or self.their_http_version < b"1.1": 

607 # Either we never got a valid request and are sending back an 

608 # error (their_http_version is None), so we assume the worst; 

609 # or else we did get a valid HTTP/1.0 request, so we know that 

610 # they don't understand chunked encoding. 

611 headers = set_comma_header(headers, b"transfer-encoding", []) 

612 # This is actually redundant ATM, since currently we 

613 # unconditionally disable keep-alive when talking to HTTP/1.0 

614 # peers. But let's be defensive just in case we add 

615 # Connection: keep-alive support later: 

616 if self._request_method != b"HEAD": 

617 need_close = True 

618 else: 

619 headers = set_comma_header(headers, b"transfer-encoding", [b"chunked"]) 

620 

621 if not self._cstate.keep_alive or need_close: 

622 # Make sure Connection: close is set 

623 connection = set(get_comma_header(headers, b"connection")) 

624 connection.discard(b"keep-alive") 

625 connection.add(b"close") 

626 headers = set_comma_header(headers, b"connection", sorted(connection)) 

627 

628 return Response( 

629 headers=headers, 

630 status_code=response.status_code, 

631 http_version=response.http_version, 

632 reason=response.reason, 

633 )