1. Tornado is single-threaded and WSGI applications are synchronous. If we use Tornado to start WSGI applications, we can only handle one request at a time theoretically, and if any request is blocked, the entire IOLOP of Tornado will be blocked. As shown below, we issue two GET request directions simultaneouslyhttp://127.0.0.1: 5000 /
We can see that the first request will be returned in about 5 seconds, and the other request will be returned in about 10 seconds, so we can determine that the two requests are executed sequentially.
from tornado.wsgi import WSGIContainer from tornado.httpserver import HTTPServer from tornado.ioloop import IOLoopfrom flask import Flask import time
app = Flask(__name__)
@app.route('/')
def index():
time.sleep(5) return 'OK'
if __name__ == '__main__':
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(5000)
IOLoop.instance().start()
2. As we know, Tornado implements asynchronously running synchronous functions, which we can only run with threads, as shown below:
import tornado.web import tornado.ioloop import time import tornado class IndexHandler(tornado.web.RequestHandler): "" @Tornado.gen. Coroutine def get(self): "" "corresponding to the HTTP get request methods ", "" loop = tornado. Ioloop. Ioloop. The instance () yield loop. Run_in_executor (None, self. Sleep) self.write("Hello You!" ) def sleep(self): time.sleep(5) self.write('sleep OK') if __name__ == "__main__": app = tornado.web.Application([ (r"/", IndexHandler), ]) app.listen(8000) tornado.ioloop.IOLoop.current().start()
3. What do we do about this situation? Looking at WSGicontainer’s code we see:
class WSGIContainer(object): def __init__(self, wsgi_application): self.wsgi_application = wsgi_application def __call__(self, request): data = {} response = [] def start_response(status, response_headers, exc_info=None): Data ["status"] = status data["headers"] = response_response self.wsgi_application( WSGIContainer.environ(request), start_response) try: response.extend(app_response) body = b"".join(response) finally: if hasattr(app_response, "close"): app_response.close() if not data: raise Exception("WSGI app did not call start_response") status_code, reason = data["status"].split(' ', 1) status_code = int(status_code) headers = data["headers"] header_set = set(k.lower() for (k, v) in headers) body = escape.utf8(body) if status_code ! = 304: if "content-length" not in header_set: headers.append(("Content-Length", str(len(body)))) if "content-type" not in header_set: headers.append(("Content-Type", "text/html; charset=UTF-8")) if "server" not in header_set: Headers. Append ((" Server "and" TornadoServer / % s "% tornado. Version)) start_line = httputil. ResponseStartLine (" HTTP / 1.1", status_code, reason) header_obj = httputil.HTTPHeaders() for key, value in headers: header_obj.add(key, value) request.connection.write_headers(start_line, header_obj, chunk=body) request.connection.finish() self._log(status_code, request)
Change app_response to an asynchronous fetch (using yield)
import tornado from tornado import escape from tornado import httputil from typing import List, Tuple, Optional, Callable, Any, Dict from types import TracebackType class WSGIContainer_With_Thread(WSGIContainer): @tornado.gen.coroutine def __call__(self, request): data = {} # type: Dict[str, Any] response = [] # type: List[bytes] def start_response( status: str, headers: List[Tuple[str, str]], exc_info: Optional[ Tuple[ "Optional[Type[BaseException]]", Optional[BaseException], Optional[TracebackType], ] ] = None, ) -> Callable[[bytes], Any]: Data (" status ") = status data [" headers "] = headers return response. Append loop = tornado. Ioloop. Ioloop. The instance () # modified here app_response = yield loop.run_in_executor(None, self.wsgi_application, WSGIContainer.environ(request), start_response) # app_response = self.wsgi_application( # WSGIContainer.environ(request), start_response # ) try: response.extend(app_response) body = b"".join(response) finally: if hasattr(app_response, "close"): app_response.close() # type: ignore if not data: raise Exception("WSGI app did not call start_response") status_code_str, reason = data["status"].split(" ", 1) status_code = int(status_code_str) headers = data["headers"] # type: List[Tuple[str, str]] header_set = set(k.lower() for (k, v) in headers) body = escape.utf8(body) if status_code ! = 304: if "content-length" not in header_set: headers.append(("Content-Length", str(len(body)))) if "content-type" not in header_set: headers.append(("Content-Type", "text/html; charset=UTF-8")) if "server" not in header_set: Headers. Append ((" Server "and" TornadoServer / % s "% tornado. Version)) start_line = httputil. ResponseStartLine (" HTTP / 1.1", status_code, reason) header_obj = httputil.HTTPHeaders() for key, value in headers: header_obj.add(key, value) assert request.connection is not None request.connection.write_headers(start_line, header_obj, chunk=body) request.connection.finish() self._log(status_code, request) if __name__ == '__main__': http_server = HTTPServer(WSGIContainer_With_Thread(app)) http_server.listen(5000) IOLoop.instance().start()
Note:
1. This method does not actually improve the performance, after all, it still uses multi-threading to run, so it is recommended to use Tornado or write real asynchronous code in conjunction with Tornado web framework, so as to achieve the high performance purpose of Tornado asynchronous IO. Yield no longer supports coroutines in Python 3.10