Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tornado/httpserver.py: 34%

126 statements  

« prev     ^ index     » next       coverage.py v7.2.7, created at 2023-07-01 06:54 +0000

1# 

2# Copyright 2009 Facebook 

3# 

4# Licensed under the Apache License, Version 2.0 (the "License"); you may 

5# not use this file except in compliance with the License. You may obtain 

6# a copy of the License at 

7# 

8# http://www.apache.org/licenses/LICENSE-2.0 

9# 

10# Unless required by applicable law or agreed to in writing, software 

11# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 

12# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 

13# License for the specific language governing permissions and limitations 

14# under the License. 

15 

16"""A non-blocking, single-threaded HTTP server. 

17 

18Typical applications have little direct interaction with the `HTTPServer` 

19class except to start a server at the beginning of the process 

20(and even that is often done indirectly via `tornado.web.Application.listen`). 

21 

22.. versionchanged:: 4.0 

23 

24 The ``HTTPRequest`` class that used to live in this module has been moved 

25 to `tornado.httputil.HTTPServerRequest`. The old name remains as an alias. 

26""" 

27 

28import socket 

29import ssl 

30 

31from tornado.escape import native_str 

32from tornado.http1connection import HTTP1ServerConnection, HTTP1ConnectionParameters 

33from tornado import httputil 

34from tornado import iostream 

35from tornado import netutil 

36from tornado.tcpserver import TCPServer 

37from tornado.util import Configurable 

38 

39import typing 

40from typing import Union, Any, Dict, Callable, List, Type, Tuple, Optional, Awaitable 

41 

42if typing.TYPE_CHECKING: 

43 from typing import Set # noqa: F401 

44 

45 

46class HTTPServer(TCPServer, Configurable, httputil.HTTPServerConnectionDelegate): 

47 r"""A non-blocking, single-threaded HTTP server. 

48 

49 A server is defined by a subclass of `.HTTPServerConnectionDelegate`, 

50 or, for backwards compatibility, a callback that takes an 

51 `.HTTPServerRequest` as an argument. The delegate is usually a 

52 `tornado.web.Application`. 

53 

54 `HTTPServer` supports keep-alive connections by default 

55 (automatically for HTTP/1.1, or for HTTP/1.0 when the client 

56 requests ``Connection: keep-alive``). 

57 

58 If ``xheaders`` is ``True``, we support the 

59 ``X-Real-Ip``/``X-Forwarded-For`` and 

60 ``X-Scheme``/``X-Forwarded-Proto`` headers, which override the 

61 remote IP and URI scheme/protocol for all requests. These headers 

62 are useful when running Tornado behind a reverse proxy or load 

63 balancer. The ``protocol`` argument can also be set to ``https`` 

64 if Tornado is run behind an SSL-decoding proxy that does not set one of 

65 the supported ``xheaders``. 

66 

67 By default, when parsing the ``X-Forwarded-For`` header, Tornado will 

68 select the last (i.e., the closest) address on the list of hosts as the 

69 remote host IP address. To select the next server in the chain, a list of 

70 trusted downstream hosts may be passed as the ``trusted_downstream`` 

71 argument. These hosts will be skipped when parsing the ``X-Forwarded-For`` 

72 header. 

73 

74 To make this server serve SSL traffic, send the ``ssl_options`` keyword 

75 argument with an `ssl.SSLContext` object. For compatibility with older 

76 versions of Python ``ssl_options`` may also be a dictionary of keyword 

77 arguments for the `ssl.wrap_socket` method.:: 

78 

79 ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) 

80 ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"), 

81 os.path.join(data_dir, "mydomain.key")) 

82 HTTPServer(application, ssl_options=ssl_ctx) 

83 

84 `HTTPServer` initialization follows one of three patterns (the 

85 initialization methods are defined on `tornado.tcpserver.TCPServer`): 

86 

87 1. `~tornado.tcpserver.TCPServer.listen`: single-process:: 

88 

89 async def main(): 

90 server = HTTPServer() 

91 server.listen(8888) 

92 await asyncio.Event.wait() 

93 

94 asyncio.run(main()) 

95 

96 In many cases, `tornado.web.Application.listen` can be used to avoid 

97 the need to explicitly create the `HTTPServer`. 

98 

99 While this example does not create multiple processes on its own, when 

100 the ``reuse_port=True`` argument is passed to ``listen()`` you can run 

101 the program multiple times to create a multi-process service. 

102 

103 2. `~tornado.tcpserver.TCPServer.add_sockets`: multi-process:: 

104 

105 sockets = bind_sockets(8888) 

106 tornado.process.fork_processes(0) 

107 async def post_fork_main(): 

108 server = HTTPServer() 

109 server.add_sockets(sockets) 

110 await asyncio.Event().wait() 

111 asyncio.run(post_fork_main()) 

112 

113 The ``add_sockets`` interface is more complicated, but it can be used with 

114 `tornado.process.fork_processes` to run a multi-process service with all 

115 worker processes forked from a single parent. ``add_sockets`` can also be 

116 used in single-process servers if you want to create your listening 

117 sockets in some way other than `~tornado.netutil.bind_sockets`. 

118 

119 Note that when using this pattern, nothing that touches the event loop 

120 can be run before ``fork_processes``. 

121 

122 3. `~tornado.tcpserver.TCPServer.bind`/`~tornado.tcpserver.TCPServer.start`: 

123 simple **deprecated** multi-process:: 

124 

125 server = HTTPServer() 

126 server.bind(8888) 

127 server.start(0) # Forks multiple sub-processes 

128 IOLoop.current().start() 

129 

130 This pattern is deprecated because it requires interfaces in the 

131 `asyncio` module that have been deprecated since Python 3.10. Support for 

132 creating multiple processes in the ``start`` method will be removed in a 

133 future version of Tornado. 

134 

135 .. versionchanged:: 4.0 

136 Added ``decompress_request``, ``chunk_size``, ``max_header_size``, 

137 ``idle_connection_timeout``, ``body_timeout``, ``max_body_size`` 

138 arguments. Added support for `.HTTPServerConnectionDelegate` 

139 instances as ``request_callback``. 

140 

141 .. versionchanged:: 4.1 

142 `.HTTPServerConnectionDelegate.start_request` is now called with 

143 two arguments ``(server_conn, request_conn)`` (in accordance with the 

144 documentation) instead of one ``(request_conn)``. 

145 

146 .. versionchanged:: 4.2 

147 `HTTPServer` is now a subclass of `tornado.util.Configurable`. 

148 

149 .. versionchanged:: 4.5 

150 Added the ``trusted_downstream`` argument. 

151 

152 .. versionchanged:: 5.0 

153 The ``io_loop`` argument has been removed. 

154 """ 

155 

156 def __init__(self, *args: Any, **kwargs: Any) -> None: 

157 # Ignore args to __init__; real initialization belongs in 

158 # initialize since we're Configurable. (there's something 

159 # weird in initialization order between this class, 

160 # Configurable, and TCPServer so we can't leave __init__ out 

161 # completely) 

162 pass 

163 

164 def initialize( 

165 self, 

166 request_callback: Union[ 

167 httputil.HTTPServerConnectionDelegate, 

168 Callable[[httputil.HTTPServerRequest], None], 

169 ], 

170 no_keep_alive: bool = False, 

171 xheaders: bool = False, 

172 ssl_options: Optional[Union[Dict[str, Any], ssl.SSLContext]] = None, 

173 protocol: Optional[str] = None, 

174 decompress_request: bool = False, 

175 chunk_size: Optional[int] = None, 

176 max_header_size: Optional[int] = None, 

177 idle_connection_timeout: Optional[float] = None, 

178 body_timeout: Optional[float] = None, 

179 max_body_size: Optional[int] = None, 

180 max_buffer_size: Optional[int] = None, 

181 trusted_downstream: Optional[List[str]] = None, 

182 ) -> None: 

183 # This method's signature is not extracted with autodoc 

184 # because we want its arguments to appear on the class 

185 # constructor. When changing this signature, also update the 

186 # copy in httpserver.rst. 

187 self.request_callback = request_callback 

188 self.xheaders = xheaders 

189 self.protocol = protocol 

190 self.conn_params = HTTP1ConnectionParameters( 

191 decompress=decompress_request, 

192 chunk_size=chunk_size, 

193 max_header_size=max_header_size, 

194 header_timeout=idle_connection_timeout or 3600, 

195 max_body_size=max_body_size, 

196 body_timeout=body_timeout, 

197 no_keep_alive=no_keep_alive, 

198 ) 

199 TCPServer.__init__( 

200 self, 

201 ssl_options=ssl_options, 

202 max_buffer_size=max_buffer_size, 

203 read_chunk_size=chunk_size, 

204 ) 

205 self._connections = set() # type: Set[HTTP1ServerConnection] 

206 self.trusted_downstream = trusted_downstream 

207 

208 @classmethod 

209 def configurable_base(cls) -> Type[Configurable]: 

210 return HTTPServer 

211 

212 @classmethod 

213 def configurable_default(cls) -> Type[Configurable]: 

214 return HTTPServer 

215 

216 async def close_all_connections(self) -> None: 

217 """Close all open connections and asynchronously wait for them to finish. 

218 

219 This method is used in combination with `~.TCPServer.stop` to 

220 support clean shutdowns (especially for unittests). Typical 

221 usage would call ``stop()`` first to stop accepting new 

222 connections, then ``await close_all_connections()`` to wait for 

223 existing connections to finish. 

224 

225 This method does not currently close open websocket connections. 

226 

227 Note that this method is a coroutine and must be called with ``await``. 

228 

229 """ 

230 while self._connections: 

231 # Peek at an arbitrary element of the set 

232 conn = next(iter(self._connections)) 

233 await conn.close() 

234 

235 def handle_stream(self, stream: iostream.IOStream, address: Tuple) -> None: 

236 context = _HTTPRequestContext( 

237 stream, address, self.protocol, self.trusted_downstream 

238 ) 

239 conn = HTTP1ServerConnection(stream, self.conn_params, context) 

240 self._connections.add(conn) 

241 conn.start_serving(self) 

242 

243 def start_request( 

244 self, server_conn: object, request_conn: httputil.HTTPConnection 

245 ) -> httputil.HTTPMessageDelegate: 

246 if isinstance(self.request_callback, httputil.HTTPServerConnectionDelegate): 

247 delegate = self.request_callback.start_request(server_conn, request_conn) 

248 else: 

249 delegate = _CallableAdapter(self.request_callback, request_conn) 

250 

251 if self.xheaders: 

252 delegate = _ProxyAdapter(delegate, request_conn) 

253 

254 return delegate 

255 

256 def on_close(self, server_conn: object) -> None: 

257 self._connections.remove(typing.cast(HTTP1ServerConnection, server_conn)) 

258 

259 

260class _CallableAdapter(httputil.HTTPMessageDelegate): 

261 def __init__( 

262 self, 

263 request_callback: Callable[[httputil.HTTPServerRequest], None], 

264 request_conn: httputil.HTTPConnection, 

265 ) -> None: 

266 self.connection = request_conn 

267 self.request_callback = request_callback 

268 self.request = None # type: Optional[httputil.HTTPServerRequest] 

269 self.delegate = None 

270 self._chunks = [] # type: List[bytes] 

271 

272 def headers_received( 

273 self, 

274 start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine], 

275 headers: httputil.HTTPHeaders, 

276 ) -> Optional[Awaitable[None]]: 

277 self.request = httputil.HTTPServerRequest( 

278 connection=self.connection, 

279 start_line=typing.cast(httputil.RequestStartLine, start_line), 

280 headers=headers, 

281 ) 

282 return None 

283 

284 def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]: 

285 self._chunks.append(chunk) 

286 return None 

287 

288 def finish(self) -> None: 

289 assert self.request is not None 

290 self.request.body = b"".join(self._chunks) 

291 self.request._parse_body() 

292 self.request_callback(self.request) 

293 

294 def on_connection_close(self) -> None: 

295 del self._chunks 

296 

297 

298class _HTTPRequestContext(object): 

299 def __init__( 

300 self, 

301 stream: iostream.IOStream, 

302 address: Tuple, 

303 protocol: Optional[str], 

304 trusted_downstream: Optional[List[str]] = None, 

305 ) -> None: 

306 self.address = address 

307 # Save the socket's address family now so we know how to 

308 # interpret self.address even after the stream is closed 

309 # and its socket attribute replaced with None. 

310 if stream.socket is not None: 

311 self.address_family = stream.socket.family 

312 else: 

313 self.address_family = None 

314 # In HTTPServerRequest we want an IP, not a full socket address. 

315 if ( 

316 self.address_family in (socket.AF_INET, socket.AF_INET6) 

317 and address is not None 

318 ): 

319 self.remote_ip = address[0] 

320 else: 

321 # Unix (or other) socket; fake the remote address. 

322 self.remote_ip = "0.0.0.0" 

323 if protocol: 

324 self.protocol = protocol 

325 elif isinstance(stream, iostream.SSLIOStream): 

326 self.protocol = "https" 

327 else: 

328 self.protocol = "http" 

329 self._orig_remote_ip = self.remote_ip 

330 self._orig_protocol = self.protocol 

331 self.trusted_downstream = set(trusted_downstream or []) 

332 

333 def __str__(self) -> str: 

334 if self.address_family in (socket.AF_INET, socket.AF_INET6): 

335 return self.remote_ip 

336 elif isinstance(self.address, bytes): 

337 # Python 3 with the -bb option warns about str(bytes), 

338 # so convert it explicitly. 

339 # Unix socket addresses are str on mac but bytes on linux. 

340 return native_str(self.address) 

341 else: 

342 return str(self.address) 

343 

344 def _apply_xheaders(self, headers: httputil.HTTPHeaders) -> None: 

345 """Rewrite the ``remote_ip`` and ``protocol`` fields.""" 

346 # Squid uses X-Forwarded-For, others use X-Real-Ip 

347 ip = headers.get("X-Forwarded-For", self.remote_ip) 

348 # Skip trusted downstream hosts in X-Forwarded-For list 

349 for ip in (cand.strip() for cand in reversed(ip.split(","))): 

350 if ip not in self.trusted_downstream: 

351 break 

352 ip = headers.get("X-Real-Ip", ip) 

353 if netutil.is_valid_ip(ip): 

354 self.remote_ip = ip 

355 # AWS uses X-Forwarded-Proto 

356 proto_header = headers.get( 

357 "X-Scheme", headers.get("X-Forwarded-Proto", self.protocol) 

358 ) 

359 if proto_header: 

360 # use only the last proto entry if there is more than one 

361 # TODO: support trusting multiple layers of proxied protocol 

362 proto_header = proto_header.split(",")[-1].strip() 

363 if proto_header in ("http", "https"): 

364 self.protocol = proto_header 

365 

366 def _unapply_xheaders(self) -> None: 

367 """Undo changes from `_apply_xheaders`. 

368 

369 Xheaders are per-request so they should not leak to the next 

370 request on the same connection. 

371 """ 

372 self.remote_ip = self._orig_remote_ip 

373 self.protocol = self._orig_protocol 

374 

375 

376class _ProxyAdapter(httputil.HTTPMessageDelegate): 

377 def __init__( 

378 self, 

379 delegate: httputil.HTTPMessageDelegate, 

380 request_conn: httputil.HTTPConnection, 

381 ) -> None: 

382 self.connection = request_conn 

383 self.delegate = delegate 

384 

385 def headers_received( 

386 self, 

387 start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine], 

388 headers: httputil.HTTPHeaders, 

389 ) -> Optional[Awaitable[None]]: 

390 # TODO: either make context an official part of the 

391 # HTTPConnection interface or figure out some other way to do this. 

392 self.connection.context._apply_xheaders(headers) # type: ignore 

393 return self.delegate.headers_received(start_line, headers) 

394 

395 def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]: 

396 return self.delegate.data_received(chunk) 

397 

398 def finish(self) -> None: 

399 self.delegate.finish() 

400 self._cleanup() 

401 

402 def on_connection_close(self) -> None: 

403 self.delegate.on_connection_close() 

404 self._cleanup() 

405 

406 def _cleanup(self) -> None: 

407 self.connection.context._unapply_xheaders() # type: ignore 

408 

409 

410HTTPRequest = httputil.HTTPServerRequest