Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tornado/httpserver.py: 34%
126 statements
« prev ^ index » next coverage.py v7.2.7, created at 2023-07-01 06:54 +0000
« prev ^ index » next coverage.py v7.2.7, created at 2023-07-01 06:54 +0000
1#
2# Copyright 2009 Facebook
3#
4# Licensed under the Apache License, Version 2.0 (the "License"); you may
5# not use this file except in compliance with the License. You may obtain
6# a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13# License for the specific language governing permissions and limitations
14# under the License.
16"""A non-blocking, single-threaded HTTP server.
18Typical applications have little direct interaction with the `HTTPServer`
19class except to start a server at the beginning of the process
20(and even that is often done indirectly via `tornado.web.Application.listen`).
22.. versionchanged:: 4.0
24 The ``HTTPRequest`` class that used to live in this module has been moved
25 to `tornado.httputil.HTTPServerRequest`. The old name remains as an alias.
26"""
28import socket
29import ssl
31from tornado.escape import native_str
32from tornado.http1connection import HTTP1ServerConnection, HTTP1ConnectionParameters
33from tornado import httputil
34from tornado import iostream
35from tornado import netutil
36from tornado.tcpserver import TCPServer
37from tornado.util import Configurable
39import typing
40from typing import Union, Any, Dict, Callable, List, Type, Tuple, Optional, Awaitable
42if typing.TYPE_CHECKING:
43 from typing import Set # noqa: F401
46class HTTPServer(TCPServer, Configurable, httputil.HTTPServerConnectionDelegate):
47 r"""A non-blocking, single-threaded HTTP server.
49 A server is defined by a subclass of `.HTTPServerConnectionDelegate`,
50 or, for backwards compatibility, a callback that takes an
51 `.HTTPServerRequest` as an argument. The delegate is usually a
52 `tornado.web.Application`.
54 `HTTPServer` supports keep-alive connections by default
55 (automatically for HTTP/1.1, or for HTTP/1.0 when the client
56 requests ``Connection: keep-alive``).
58 If ``xheaders`` is ``True``, we support the
59 ``X-Real-Ip``/``X-Forwarded-For`` and
60 ``X-Scheme``/``X-Forwarded-Proto`` headers, which override the
61 remote IP and URI scheme/protocol for all requests. These headers
62 are useful when running Tornado behind a reverse proxy or load
63 balancer. The ``protocol`` argument can also be set to ``https``
64 if Tornado is run behind an SSL-decoding proxy that does not set one of
65 the supported ``xheaders``.
67 By default, when parsing the ``X-Forwarded-For`` header, Tornado will
68 select the last (i.e., the closest) address on the list of hosts as the
69 remote host IP address. To select the next server in the chain, a list of
70 trusted downstream hosts may be passed as the ``trusted_downstream``
71 argument. These hosts will be skipped when parsing the ``X-Forwarded-For``
72 header.
74 To make this server serve SSL traffic, send the ``ssl_options`` keyword
75 argument with an `ssl.SSLContext` object. For compatibility with older
76 versions of Python ``ssl_options`` may also be a dictionary of keyword
77 arguments for the `ssl.wrap_socket` method.::
79 ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
80 ssl_ctx.load_cert_chain(os.path.join(data_dir, "mydomain.crt"),
81 os.path.join(data_dir, "mydomain.key"))
82 HTTPServer(application, ssl_options=ssl_ctx)
84 `HTTPServer` initialization follows one of three patterns (the
85 initialization methods are defined on `tornado.tcpserver.TCPServer`):
87 1. `~tornado.tcpserver.TCPServer.listen`: single-process::
89 async def main():
90 server = HTTPServer()
91 server.listen(8888)
92 await asyncio.Event.wait()
94 asyncio.run(main())
96 In many cases, `tornado.web.Application.listen` can be used to avoid
97 the need to explicitly create the `HTTPServer`.
99 While this example does not create multiple processes on its own, when
100 the ``reuse_port=True`` argument is passed to ``listen()`` you can run
101 the program multiple times to create a multi-process service.
103 2. `~tornado.tcpserver.TCPServer.add_sockets`: multi-process::
105 sockets = bind_sockets(8888)
106 tornado.process.fork_processes(0)
107 async def post_fork_main():
108 server = HTTPServer()
109 server.add_sockets(sockets)
110 await asyncio.Event().wait()
111 asyncio.run(post_fork_main())
113 The ``add_sockets`` interface is more complicated, but it can be used with
114 `tornado.process.fork_processes` to run a multi-process service with all
115 worker processes forked from a single parent. ``add_sockets`` can also be
116 used in single-process servers if you want to create your listening
117 sockets in some way other than `~tornado.netutil.bind_sockets`.
119 Note that when using this pattern, nothing that touches the event loop
120 can be run before ``fork_processes``.
122 3. `~tornado.tcpserver.TCPServer.bind`/`~tornado.tcpserver.TCPServer.start`:
123 simple **deprecated** multi-process::
125 server = HTTPServer()
126 server.bind(8888)
127 server.start(0) # Forks multiple sub-processes
128 IOLoop.current().start()
130 This pattern is deprecated because it requires interfaces in the
131 `asyncio` module that have been deprecated since Python 3.10. Support for
132 creating multiple processes in the ``start`` method will be removed in a
133 future version of Tornado.
135 .. versionchanged:: 4.0
136 Added ``decompress_request``, ``chunk_size``, ``max_header_size``,
137 ``idle_connection_timeout``, ``body_timeout``, ``max_body_size``
138 arguments. Added support for `.HTTPServerConnectionDelegate`
139 instances as ``request_callback``.
141 .. versionchanged:: 4.1
142 `.HTTPServerConnectionDelegate.start_request` is now called with
143 two arguments ``(server_conn, request_conn)`` (in accordance with the
144 documentation) instead of one ``(request_conn)``.
146 .. versionchanged:: 4.2
147 `HTTPServer` is now a subclass of `tornado.util.Configurable`.
149 .. versionchanged:: 4.5
150 Added the ``trusted_downstream`` argument.
152 .. versionchanged:: 5.0
153 The ``io_loop`` argument has been removed.
154 """
156 def __init__(self, *args: Any, **kwargs: Any) -> None:
157 # Ignore args to __init__; real initialization belongs in
158 # initialize since we're Configurable. (there's something
159 # weird in initialization order between this class,
160 # Configurable, and TCPServer so we can't leave __init__ out
161 # completely)
162 pass
164 def initialize(
165 self,
166 request_callback: Union[
167 httputil.HTTPServerConnectionDelegate,
168 Callable[[httputil.HTTPServerRequest], None],
169 ],
170 no_keep_alive: bool = False,
171 xheaders: bool = False,
172 ssl_options: Optional[Union[Dict[str, Any], ssl.SSLContext]] = None,
173 protocol: Optional[str] = None,
174 decompress_request: bool = False,
175 chunk_size: Optional[int] = None,
176 max_header_size: Optional[int] = None,
177 idle_connection_timeout: Optional[float] = None,
178 body_timeout: Optional[float] = None,
179 max_body_size: Optional[int] = None,
180 max_buffer_size: Optional[int] = None,
181 trusted_downstream: Optional[List[str]] = None,
182 ) -> None:
183 # This method's signature is not extracted with autodoc
184 # because we want its arguments to appear on the class
185 # constructor. When changing this signature, also update the
186 # copy in httpserver.rst.
187 self.request_callback = request_callback
188 self.xheaders = xheaders
189 self.protocol = protocol
190 self.conn_params = HTTP1ConnectionParameters(
191 decompress=decompress_request,
192 chunk_size=chunk_size,
193 max_header_size=max_header_size,
194 header_timeout=idle_connection_timeout or 3600,
195 max_body_size=max_body_size,
196 body_timeout=body_timeout,
197 no_keep_alive=no_keep_alive,
198 )
199 TCPServer.__init__(
200 self,
201 ssl_options=ssl_options,
202 max_buffer_size=max_buffer_size,
203 read_chunk_size=chunk_size,
204 )
205 self._connections = set() # type: Set[HTTP1ServerConnection]
206 self.trusted_downstream = trusted_downstream
208 @classmethod
209 def configurable_base(cls) -> Type[Configurable]:
210 return HTTPServer
212 @classmethod
213 def configurable_default(cls) -> Type[Configurable]:
214 return HTTPServer
216 async def close_all_connections(self) -> None:
217 """Close all open connections and asynchronously wait for them to finish.
219 This method is used in combination with `~.TCPServer.stop` to
220 support clean shutdowns (especially for unittests). Typical
221 usage would call ``stop()`` first to stop accepting new
222 connections, then ``await close_all_connections()`` to wait for
223 existing connections to finish.
225 This method does not currently close open websocket connections.
227 Note that this method is a coroutine and must be called with ``await``.
229 """
230 while self._connections:
231 # Peek at an arbitrary element of the set
232 conn = next(iter(self._connections))
233 await conn.close()
235 def handle_stream(self, stream: iostream.IOStream, address: Tuple) -> None:
236 context = _HTTPRequestContext(
237 stream, address, self.protocol, self.trusted_downstream
238 )
239 conn = HTTP1ServerConnection(stream, self.conn_params, context)
240 self._connections.add(conn)
241 conn.start_serving(self)
243 def start_request(
244 self, server_conn: object, request_conn: httputil.HTTPConnection
245 ) -> httputil.HTTPMessageDelegate:
246 if isinstance(self.request_callback, httputil.HTTPServerConnectionDelegate):
247 delegate = self.request_callback.start_request(server_conn, request_conn)
248 else:
249 delegate = _CallableAdapter(self.request_callback, request_conn)
251 if self.xheaders:
252 delegate = _ProxyAdapter(delegate, request_conn)
254 return delegate
256 def on_close(self, server_conn: object) -> None:
257 self._connections.remove(typing.cast(HTTP1ServerConnection, server_conn))
260class _CallableAdapter(httputil.HTTPMessageDelegate):
261 def __init__(
262 self,
263 request_callback: Callable[[httputil.HTTPServerRequest], None],
264 request_conn: httputil.HTTPConnection,
265 ) -> None:
266 self.connection = request_conn
267 self.request_callback = request_callback
268 self.request = None # type: Optional[httputil.HTTPServerRequest]
269 self.delegate = None
270 self._chunks = [] # type: List[bytes]
272 def headers_received(
273 self,
274 start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
275 headers: httputil.HTTPHeaders,
276 ) -> Optional[Awaitable[None]]:
277 self.request = httputil.HTTPServerRequest(
278 connection=self.connection,
279 start_line=typing.cast(httputil.RequestStartLine, start_line),
280 headers=headers,
281 )
282 return None
284 def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]:
285 self._chunks.append(chunk)
286 return None
288 def finish(self) -> None:
289 assert self.request is not None
290 self.request.body = b"".join(self._chunks)
291 self.request._parse_body()
292 self.request_callback(self.request)
294 def on_connection_close(self) -> None:
295 del self._chunks
298class _HTTPRequestContext(object):
299 def __init__(
300 self,
301 stream: iostream.IOStream,
302 address: Tuple,
303 protocol: Optional[str],
304 trusted_downstream: Optional[List[str]] = None,
305 ) -> None:
306 self.address = address
307 # Save the socket's address family now so we know how to
308 # interpret self.address even after the stream is closed
309 # and its socket attribute replaced with None.
310 if stream.socket is not None:
311 self.address_family = stream.socket.family
312 else:
313 self.address_family = None
314 # In HTTPServerRequest we want an IP, not a full socket address.
315 if (
316 self.address_family in (socket.AF_INET, socket.AF_INET6)
317 and address is not None
318 ):
319 self.remote_ip = address[0]
320 else:
321 # Unix (or other) socket; fake the remote address.
322 self.remote_ip = "0.0.0.0"
323 if protocol:
324 self.protocol = protocol
325 elif isinstance(stream, iostream.SSLIOStream):
326 self.protocol = "https"
327 else:
328 self.protocol = "http"
329 self._orig_remote_ip = self.remote_ip
330 self._orig_protocol = self.protocol
331 self.trusted_downstream = set(trusted_downstream or [])
333 def __str__(self) -> str:
334 if self.address_family in (socket.AF_INET, socket.AF_INET6):
335 return self.remote_ip
336 elif isinstance(self.address, bytes):
337 # Python 3 with the -bb option warns about str(bytes),
338 # so convert it explicitly.
339 # Unix socket addresses are str on mac but bytes on linux.
340 return native_str(self.address)
341 else:
342 return str(self.address)
344 def _apply_xheaders(self, headers: httputil.HTTPHeaders) -> None:
345 """Rewrite the ``remote_ip`` and ``protocol`` fields."""
346 # Squid uses X-Forwarded-For, others use X-Real-Ip
347 ip = headers.get("X-Forwarded-For", self.remote_ip)
348 # Skip trusted downstream hosts in X-Forwarded-For list
349 for ip in (cand.strip() for cand in reversed(ip.split(","))):
350 if ip not in self.trusted_downstream:
351 break
352 ip = headers.get("X-Real-Ip", ip)
353 if netutil.is_valid_ip(ip):
354 self.remote_ip = ip
355 # AWS uses X-Forwarded-Proto
356 proto_header = headers.get(
357 "X-Scheme", headers.get("X-Forwarded-Proto", self.protocol)
358 )
359 if proto_header:
360 # use only the last proto entry if there is more than one
361 # TODO: support trusting multiple layers of proxied protocol
362 proto_header = proto_header.split(",")[-1].strip()
363 if proto_header in ("http", "https"):
364 self.protocol = proto_header
366 def _unapply_xheaders(self) -> None:
367 """Undo changes from `_apply_xheaders`.
369 Xheaders are per-request so they should not leak to the next
370 request on the same connection.
371 """
372 self.remote_ip = self._orig_remote_ip
373 self.protocol = self._orig_protocol
376class _ProxyAdapter(httputil.HTTPMessageDelegate):
377 def __init__(
378 self,
379 delegate: httputil.HTTPMessageDelegate,
380 request_conn: httputil.HTTPConnection,
381 ) -> None:
382 self.connection = request_conn
383 self.delegate = delegate
385 def headers_received(
386 self,
387 start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
388 headers: httputil.HTTPHeaders,
389 ) -> Optional[Awaitable[None]]:
390 # TODO: either make context an official part of the
391 # HTTPConnection interface or figure out some other way to do this.
392 self.connection.context._apply_xheaders(headers) # type: ignore
393 return self.delegate.headers_received(start_line, headers)
395 def data_received(self, chunk: bytes) -> Optional[Awaitable[None]]:
396 return self.delegate.data_received(chunk)
398 def finish(self) -> None:
399 self.delegate.finish()
400 self._cleanup()
402 def on_connection_close(self) -> None:
403 self.delegate.on_connection_close()
404 self._cleanup()
406 def _cleanup(self) -> None:
407 self.connection.context._unapply_xheaders() # type: ignore
410HTTPRequest = httputil.HTTPServerRequest