Package cherrypy :: Package wsgiserver :: Module wsgiserver3
[hide private]
[frames] | no frames]

Source Code for Module cherrypy.wsgiserver.wsgiserver3

   1  """A high-speed, production ready, thread pooled, generic HTTP server. 
   2   
   3  Simplest example on how to use this module directly 
   4  (without using CherryPy's application machinery):: 
   5   
   6      from cherrypy import wsgiserver 
   7   
   8      def my_crazy_app(environ, start_response): 
   9          status = '200 OK' 
  10          response_headers = [('Content-type','text/plain')] 
  11          start_response(status, response_headers) 
  12          return ['Hello world!'] 
  13   
  14      server = wsgiserver.CherryPyWSGIServer( 
  15                  ('0.0.0.0', 8070), my_crazy_app, 
  16                  server_name='www.cherrypy.example') 
  17      server.start() 
  18   
  19  The CherryPy WSGI server can serve as many WSGI applications 
  20  as you want in one instance by using a WSGIPathInfoDispatcher:: 
  21   
  22      d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app}) 
  23      server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d) 
  24   
  25  Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance. 
  26   
  27  This won't call the CherryPy engine (application side) at all, only the 
  28  HTTP server, which is independent from the rest of CherryPy. Don't 
  29  let the name "CherryPyWSGIServer" throw you; the name merely reflects 
  30  its origin, not its coupling. 
  31   
  32  For those of you wanting to understand internals of this module, here's the 
  33  basic call flow. The server's listening thread runs a very tight loop, 
  34  sticking incoming connections onto a Queue:: 
  35   
  36      server = CherryPyWSGIServer(...) 
  37      server.start() 
  38      while True: 
  39          tick() 
  40          # This blocks until a request comes in: 
  41          child = socket.accept() 
  42          conn = HTTPConnection(child, ...) 
  43          server.requests.put(conn) 
  44   
  45  Worker threads are kept in a pool and poll the Queue, popping off and then 
  46  handling each connection in turn. Each connection can consist of an arbitrary 
  47  number of requests and their responses, so we run a nested loop:: 
  48   
  49      while True: 
  50          conn = server.requests.get() 
  51          conn.communicate() 
  52          ->  while True: 
  53                  req = HTTPRequest(...) 
  54                  req.parse_request() 
  55                  ->  # Read the Request-Line, e.g. "GET /page HTTP/1.1" 
  56                      req.rfile.readline() 
  57                      read_headers(req.rfile, req.inheaders) 
  58                  req.respond() 
  59                  ->  response = app(...) 
  60                      try: 
  61                          for chunk in response: 
  62                              if chunk: 
  63                                  req.write(chunk) 
  64                      finally: 
  65                          if hasattr(response, "close"): 
  66                              response.close() 
  67                  if req.close_connection: 
  68                      return 
  69  """ 
  70   
  71  __all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer', 
  72             'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile', 
  73             'CP_makefile', 
  74             'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert', 
  75             'WorkerThread', 'ThreadPool', 'SSLAdapter', 
  76             'CherryPyWSGIServer', 
  77             'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0', 
  78             'WSGIPathInfoDispatcher', 'get_ssl_adapter_class'] 
  79   
  80  import os 
  81  try: 
  82      import queue 
  83  except: 
  84      import Queue as queue 
  85  import re 
  86  import email.utils 
  87  import socket 
  88  import sys 
  89  if 'win' in sys.platform and hasattr(socket, "AF_INET6"): 
  90      if not hasattr(socket, 'IPPROTO_IPV6'): 
  91          socket.IPPROTO_IPV6 = 41 
  92      if not hasattr(socket, 'IPV6_V6ONLY'): 
  93          socket.IPV6_V6ONLY = 27 
  94  if sys.version_info < (3, 1): 
  95      import io 
  96  else: 
  97      import _pyio as io 
  98  DEFAULT_BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE 
  99   
 100  import threading 
 101  import time 
 102  from traceback import format_exc 
 103   
 104  if sys.version_info >= (3, 0): 
 105      bytestr = bytes 
 106      unicodestr = str 
 107      basestring = (bytes, str) 
 108   
109 - def ntob(n, encoding='ISO-8859-1'):
110 """Return the given native string as a byte string in the given 111 encoding. 112 """ 113 # In Python 3, the native string type is unicode 114 return n.encode(encoding)
115 else: 116 bytestr = str 117 unicodestr = unicode 118 basestring = basestring 119
120 - def ntob(n, encoding='ISO-8859-1'):
121 """Return the given native string as a byte string in the given 122 encoding. 123 """ 124 # In Python 2, the native string type is bytes. Assume it's already 125 # in the given encoding, which for ISO-8859-1 is almost always what 126 # was intended. 127 return n
128 129 LF = ntob('\n') 130 CRLF = ntob('\r\n') 131 TAB = ntob('\t') 132 SPACE = ntob(' ') 133 COLON = ntob(':') 134 SEMICOLON = ntob(';') 135 EMPTY = ntob('') 136 NUMBER_SIGN = ntob('#') 137 QUESTION_MARK = ntob('?') 138 ASTERISK = ntob('*') 139 FORWARD_SLASH = ntob('/') 140 quoted_slash = re.compile(ntob("(?i)%2F")) 141 142 import errno 143 144
145 -def plat_specific_errors(*errnames):
146 """Return error numbers for all errors in errnames on this platform. 147 148 The 'errno' module contains different global constants depending on 149 the specific platform (OS). This function will return the list of 150 numeric values for a given list of potential names. 151 """ 152 errno_names = dir(errno) 153 nums = [getattr(errno, k) for k in errnames if k in errno_names] 154 # de-dupe the list 155 return list(dict.fromkeys(nums).keys())
156 157 socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR") 158 159 socket_errors_to_ignore = plat_specific_errors( 160 "EPIPE", 161 "EBADF", "WSAEBADF", 162 "ENOTSOCK", "WSAENOTSOCK", 163 "ETIMEDOUT", "WSAETIMEDOUT", 164 "ECONNREFUSED", "WSAECONNREFUSED", 165 "ECONNRESET", "WSAECONNRESET", 166 "ECONNABORTED", "WSAECONNABORTED", 167 "ENETRESET", "WSAENETRESET", 168 "EHOSTDOWN", "EHOSTUNREACH", 169 ) 170 socket_errors_to_ignore.append("timed out") 171 socket_errors_to_ignore.append("The read operation timed out") 172 173 socket_errors_nonblocking = plat_specific_errors( 174 'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK') 175 176 comma_separated_headers = [ 177 ntob(h) for h in 178 ['Accept', 'Accept-Charset', 'Accept-Encoding', 179 'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control', 180 'Connection', 'Content-Encoding', 'Content-Language', 'Expect', 181 'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE', 182 'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning', 183 'WWW-Authenticate'] 184 ] 185 186 187 import logging 188 if not hasattr(logging, 'statistics'): 189 logging.statistics = {} 190 191
192 -def read_headers(rfile, hdict=None):
193 """Read headers from the given stream into the given header dict. 194 195 If hdict is None, a new header dict is created. Returns the populated 196 header dict. 197 198 Headers which are repeated are folded together using a comma if their 199 specification so dictates. 200 201 This function raises ValueError when the read bytes violate the HTTP spec. 202 You should probably return "400 Bad Request" if this happens. 203 """ 204 if hdict is None: 205 hdict = {} 206 207 while True: 208 line = rfile.readline() 209 if not line: 210 # No more data--illegal end of headers 211 raise ValueError("Illegal end of headers.") 212 213 if line == CRLF: 214 # Normal end of headers 215 break 216 if not line.endswith(CRLF): 217 raise ValueError("HTTP requires CRLF terminators") 218 219 if line[0] in (SPACE, TAB): 220 # It's a continuation line. 221 v = line.strip() 222 else: 223 try: 224 k, v = line.split(COLON, 1) 225 except ValueError: 226 raise ValueError("Illegal header line.") 227 # TODO: what about TE and WWW-Authenticate? 228 k = k.strip().title() 229 v = v.strip() 230 hname = k 231 232 if k in comma_separated_headers: 233 existing = hdict.get(hname) 234 if existing: 235 v = b", ".join((existing, v)) 236 hdict[hname] = v 237 238 return hdict
239 240
241 -class MaxSizeExceeded(Exception):
242 pass
243 244
245 -class SizeCheckWrapper(object):
246 247 """Wraps a file-like object, raising MaxSizeExceeded if too large.""" 248
249 - def __init__(self, rfile, maxlen):
250 self.rfile = rfile 251 self.maxlen = maxlen 252 self.bytes_read = 0
253
254 - def _check_length(self):
255 if self.maxlen and self.bytes_read > self.maxlen: 256 raise MaxSizeExceeded()
257
258 - def read(self, size=None):
259 data = self.rfile.read(size) 260 self.bytes_read += len(data) 261 self._check_length() 262 return data
263
264 - def readline(self, size=None):
265 if size is not None: 266 data = self.rfile.readline(size) 267 self.bytes_read += len(data) 268 self._check_length() 269 return data 270 271 # User didn't specify a size ... 272 # We read the line in chunks to make sure it's not a 100MB line ! 273 res = [] 274 while True: 275 data = self.rfile.readline(256) 276 self.bytes_read += len(data) 277 self._check_length() 278 res.append(data) 279 # See https://bitbucket.org/cherrypy/cherrypy/issue/421 280 if len(data) < 256 or data[-1:] == LF: 281 return EMPTY.join(res)
282
283 - def readlines(self, sizehint=0):
284 # Shamelessly stolen from StringIO 285 total = 0 286 lines = [] 287 line = self.readline() 288 while line: 289 lines.append(line) 290 total += len(line) 291 if 0 < sizehint <= total: 292 break 293 line = self.readline() 294 return lines
295
296 - def close(self):
297 self.rfile.close()
298
299 - def __iter__(self):
300 return self
301
302 - def __next__(self):
303 data = next(self.rfile) 304 self.bytes_read += len(data) 305 self._check_length() 306 return data
307
308 - def next(self):
309 data = self.rfile.next() 310 self.bytes_read += len(data) 311 self._check_length() 312 return data
313 314
315 -class KnownLengthRFile(object):
316 317 """Wraps a file-like object, returning an empty string when exhausted.""" 318
319 - def __init__(self, rfile, content_length):
320 self.rfile = rfile 321 self.remaining = content_length
322
323 - def read(self, size=None):
324 if self.remaining == 0: 325 return b'' 326 if size is None: 327 size = self.remaining 328 else: 329 size = min(size, self.remaining) 330 331 data = self.rfile.read(size) 332 self.remaining -= len(data) 333 return data
334
335 - def readline(self, size=None):
336 if self.remaining == 0: 337 return b'' 338 if size is None: 339 size = self.remaining 340 else: 341 size = min(size, self.remaining) 342 343 data = self.rfile.readline(size) 344 self.remaining -= len(data) 345 return data
346
347 - def readlines(self, sizehint=0):
348 # Shamelessly stolen from StringIO 349 total = 0 350 lines = [] 351 line = self.readline(sizehint) 352 while line: 353 lines.append(line) 354 total += len(line) 355 if 0 < sizehint <= total: 356 break 357 line = self.readline(sizehint) 358 return lines
359
360 - def close(self):
361 self.rfile.close()
362
363 - def __iter__(self):
364 return self
365
366 - def __next__(self):
367 data = next(self.rfile) 368 self.remaining -= len(data) 369 return data
370 371
372 -class ChunkedRFile(object):
373 374 """Wraps a file-like object, returning an empty string when exhausted. 375 376 This class is intended to provide a conforming wsgi.input value for 377 request entities that have been encoded with the 'chunked' transfer 378 encoding. 379 """ 380
381 - def __init__(self, rfile, maxlen, bufsize=8192):
382 self.rfile = rfile 383 self.maxlen = maxlen 384 self.bytes_read = 0 385 self.buffer = EMPTY 386 self.bufsize = bufsize 387 self.closed = False
388
389 - def _fetch(self):
390 if self.closed: 391 return 392 393 line = self.rfile.readline() 394 self.bytes_read += len(line) 395 396 if self.maxlen and self.bytes_read > self.maxlen: 397 raise MaxSizeExceeded("Request Entity Too Large", self.maxlen) 398 399 line = line.strip().split(SEMICOLON, 1) 400 401 try: 402 chunk_size = line.pop(0) 403 chunk_size = int(chunk_size, 16) 404 except ValueError: 405 raise ValueError("Bad chunked transfer size: " + repr(chunk_size)) 406 407 if chunk_size <= 0: 408 self.closed = True 409 return 410 411 ## if line: chunk_extension = line[0] 412 413 if self.maxlen and self.bytes_read + chunk_size > self.maxlen: 414 raise IOError("Request Entity Too Large") 415 416 chunk = self.rfile.read(chunk_size) 417 self.bytes_read += len(chunk) 418 self.buffer += chunk 419 420 crlf = self.rfile.read(2) 421 if crlf != CRLF: 422 raise ValueError( 423 "Bad chunked transfer coding (expected '\\r\\n', " 424 "got " + repr(crlf) + ")")
425
426 - def read(self, size=None):
427 data = EMPTY 428 while True: 429 if size and len(data) >= size: 430 return data 431 432 if not self.buffer: 433 self._fetch() 434 if not self.buffer: 435 # EOF 436 return data 437 438 if size: 439 remaining = size - len(data) 440 data += self.buffer[:remaining] 441 self.buffer = self.buffer[remaining:] 442 else: 443 data += self.buffer
444
445 - def readline(self, size=None):
446 data = EMPTY 447 while True: 448 if size and len(data) >= size: 449 return data 450 451 if not self.buffer: 452 self._fetch() 453 if not self.buffer: 454 # EOF 455 return data 456 457 newline_pos = self.buffer.find(LF) 458 if size: 459 if newline_pos == -1: 460 remaining = size - len(data) 461 data += self.buffer[:remaining] 462 self.buffer = self.buffer[remaining:] 463 else: 464 remaining = min(size - len(data), newline_pos) 465 data += self.buffer[:remaining] 466 self.buffer = self.buffer[remaining:] 467 else: 468 if newline_pos == -1: 469 data += self.buffer 470 else: 471 data += self.buffer[:newline_pos] 472 self.buffer = self.buffer[newline_pos:]
473
474 - def readlines(self, sizehint=0):
475 # Shamelessly stolen from StringIO 476 total = 0 477 lines = [] 478 line = self.readline(sizehint) 479 while line: 480 lines.append(line) 481 total += len(line) 482 if 0 < sizehint <= total: 483 break 484 line = self.readline(sizehint) 485 return lines
486
487 - def read_trailer_lines(self):
488 if not self.closed: 489 raise ValueError( 490 "Cannot read trailers until the request body has been read.") 491 492 while True: 493 line = self.rfile.readline() 494 if not line: 495 # No more data--illegal end of headers 496 raise ValueError("Illegal end of headers.") 497 498 self.bytes_read += len(line) 499 if self.maxlen and self.bytes_read > self.maxlen: 500 raise IOError("Request Entity Too Large") 501 502 if line == CRLF: 503 # Normal end of headers 504 break 505 if not line.endswith(CRLF): 506 raise ValueError("HTTP requires CRLF terminators") 507 508 yield line
509
510 - def close(self):
511 self.rfile.close()
512
513 - def __iter__(self):
514 # Shamelessly stolen from StringIO 515 total = 0 516 line = self.readline(sizehint) 517 while line: 518 yield line 519 total += len(line) 520 if 0 < sizehint <= total: 521 break 522 line = self.readline(sizehint)
523 524
525 -class HTTPRequest(object):
526 527 """An HTTP Request (and response). 528 529 A single HTTP connection may consist of multiple request/response pairs. 530 """ 531 532 server = None 533 """The HTTPServer object which is receiving this request.""" 534 535 conn = None 536 """The HTTPConnection object on which this request connected.""" 537 538 inheaders = {} 539 """A dict of request headers.""" 540 541 outheaders = [] 542 """A list of header tuples to write in the response.""" 543 544 ready = False 545 """When True, the request has been parsed and is ready to begin generating 546 the response. When False, signals the calling Connection that the response 547 should not be generated and the connection should close.""" 548 549 close_connection = False 550 """Signals the calling Connection that the request should close. This does 551 not imply an error! The client and/or server may each request that the 552 connection be closed.""" 553 554 chunked_write = False 555 """If True, output will be encoded with the "chunked" transfer-coding. 556 557 This value is set automatically inside send_headers.""" 558
559 - def __init__(self, server, conn):
560 self.server = server 561 self.conn = conn 562 563 self.ready = False 564 self.started_request = False 565 self.scheme = ntob("http") 566 if self.server.ssl_adapter is not None: 567 self.scheme = ntob("https") 568 # Use the lowest-common protocol in case read_request_line errors. 569 self.response_protocol = 'HTTP/1.0' 570 self.inheaders = {} 571 572 self.status = "" 573 self.outheaders = [] 574 self.sent_headers = False 575 self.close_connection = self.__class__.close_connection 576 self.chunked_read = False 577 self.chunked_write = self.__class__.chunked_write
578
579 - def parse_request(self):
580 """Parse the next HTTP request start-line and message-headers.""" 581 self.rfile = SizeCheckWrapper(self.conn.rfile, 582 self.server.max_request_header_size) 583 try: 584 success = self.read_request_line() 585 except MaxSizeExceeded: 586 self.simple_response( 587 "414 Request-URI Too Long", 588 "The Request-URI sent with the request exceeds the maximum " 589 "allowed bytes.") 590 return 591 else: 592 if not success: 593 return 594 595 try: 596 success = self.read_request_headers() 597 except MaxSizeExceeded: 598 self.simple_response( 599 "413 Request Entity Too Large", 600 "The headers sent with the request exceed the maximum " 601 "allowed bytes.") 602 return 603 else: 604 if not success: 605 return 606 607 self.ready = True
608
609 - def read_request_line(self):
610 # HTTP/1.1 connections are persistent by default. If a client 611 # requests a page, then idles (leaves the connection open), 612 # then rfile.readline() will raise socket.error("timed out"). 613 # Note that it does this based on the value given to settimeout(), 614 # and doesn't need the client to request or acknowledge the close 615 # (although your TCP stack might suffer for it: cf Apache's history 616 # with FIN_WAIT_2). 617 request_line = self.rfile.readline() 618 619 # Set started_request to True so communicate() knows to send 408 620 # from here on out. 621 self.started_request = True 622 if not request_line: 623 return False 624 625 if request_line == CRLF: 626 # RFC 2616 sec 4.1: "...if the server is reading the protocol 627 # stream at the beginning of a message and receives a CRLF 628 # first, it should ignore the CRLF." 629 # But only ignore one leading line! else we enable a DoS. 630 request_line = self.rfile.readline() 631 if not request_line: 632 return False 633 634 if not request_line.endswith(CRLF): 635 self.simple_response( 636 "400 Bad Request", "HTTP requires CRLF terminators") 637 return False 638 639 try: 640 method, uri, req_protocol = request_line.strip().split(SPACE, 2) 641 # The [x:y] slicing is necessary for byte strings to avoid getting 642 # ord's 643 rp = int(req_protocol[5:6]), int(req_protocol[7:8]) 644 except ValueError: 645 self.simple_response("400 Bad Request", "Malformed Request-Line") 646 return False 647 648 self.uri = uri 649 self.method = method 650 651 # uri may be an abs_path (including "http://host.domain.tld"); 652 scheme, authority, path = self.parse_request_uri(uri) 653 if NUMBER_SIGN in path: 654 self.simple_response("400 Bad Request", 655 "Illegal #fragment in Request-URI.") 656 return False 657 658 if scheme: 659 self.scheme = scheme 660 661 qs = EMPTY 662 if QUESTION_MARK in path: 663 path, qs = path.split(QUESTION_MARK, 1) 664 665 # Unquote the path+params (e.g. "/this%20path" -> "/this path"). 666 # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2 667 # 668 # But note that "...a URI must be separated into its components 669 # before the escaped characters within those components can be 670 # safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2 671 # Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path". 672 try: 673 atoms = [self.unquote_bytes(x) for x in quoted_slash.split(path)] 674 except ValueError: 675 ex = sys.exc_info()[1] 676 self.simple_response("400 Bad Request", ex.args[0]) 677 return False 678 path = b"%2F".join(atoms) 679 self.path = path 680 681 # Note that, like wsgiref and most other HTTP servers, 682 # we "% HEX HEX"-unquote the path but not the query string. 683 self.qs = qs 684 685 # Compare request and server HTTP protocol versions, in case our 686 # server does not support the requested protocol. Limit our output 687 # to min(req, server). We want the following output: 688 # request server actual written supported response 689 # protocol protocol response protocol feature set 690 # a 1.0 1.0 1.0 1.0 691 # b 1.0 1.1 1.1 1.0 692 # c 1.1 1.0 1.0 1.0 693 # d 1.1 1.1 1.1 1.1 694 # Notice that, in (b), the response will be "HTTP/1.1" even though 695 # the client only understands 1.0. RFC 2616 10.5.6 says we should 696 # only return 505 if the _major_ version is different. 697 # The [x:y] slicing is necessary for byte strings to avoid getting 698 # ord's 699 sp = int(self.server.protocol[5:6]), int(self.server.protocol[7:8]) 700 701 if sp[0] != rp[0]: 702 self.simple_response("505 HTTP Version Not Supported") 703 return False 704 705 self.request_protocol = req_protocol 706 self.response_protocol = "HTTP/%s.%s" % min(rp, sp) 707 return True
708
709 - def read_request_headers(self):
710 """Read self.rfile into self.inheaders. Return success.""" 711 712 # then all the http headers 713 try: 714 read_headers(self.rfile, self.inheaders) 715 except ValueError: 716 ex = sys.exc_info()[1] 717 self.simple_response("400 Bad Request", ex.args[0]) 718 return False 719 720 mrbs = self.server.max_request_body_size 721 if mrbs and int(self.inheaders.get(b"Content-Length", 0)) > mrbs: 722 self.simple_response( 723 "413 Request Entity Too Large", 724 "The entity sent with the request exceeds the maximum " 725 "allowed bytes.") 726 return False 727 728 # Persistent connection support 729 if self.response_protocol == "HTTP/1.1": 730 # Both server and client are HTTP/1.1 731 if self.inheaders.get(b"Connection", b"") == b"close": 732 self.close_connection = True 733 else: 734 # Either the server or client (or both) are HTTP/1.0 735 if self.inheaders.get(b"Connection", b"") != b"Keep-Alive": 736 self.close_connection = True 737 738 # Transfer-Encoding support 739 te = None 740 if self.response_protocol == "HTTP/1.1": 741 te = self.inheaders.get(b"Transfer-Encoding") 742 if te: 743 te = [x.strip().lower() for x in te.split(b",") if x.strip()] 744 745 self.chunked_read = False 746 747 if te: 748 for enc in te: 749 if enc == b"chunked": 750 self.chunked_read = True 751 else: 752 # Note that, even if we see "chunked", we must reject 753 # if there is an extension we don't recognize. 754 self.simple_response("501 Unimplemented") 755 self.close_connection = True 756 return False 757 758 # From PEP 333: 759 # "Servers and gateways that implement HTTP 1.1 must provide 760 # transparent support for HTTP 1.1's "expect/continue" mechanism. 761 # This may be done in any of several ways: 762 # 1. Respond to requests containing an Expect: 100-continue request 763 # with an immediate "100 Continue" response, and proceed normally. 764 # 2. Proceed with the request normally, but provide the application 765 # with a wsgi.input stream that will send the "100 Continue" 766 # response if/when the application first attempts to read from 767 # the input stream. The read request must then remain blocked 768 # until the client responds. 769 # 3. Wait until the client decides that the server does not support 770 # expect/continue, and sends the request body on its own. 771 # (This is suboptimal, and is not recommended.) 772 # 773 # We used to do 3, but are now doing 1. Maybe we'll do 2 someday, 774 # but it seems like it would be a big slowdown for such a rare case. 775 if self.inheaders.get(b"Expect", b"") == b"100-continue": 776 # Don't use simple_response here, because it emits headers 777 # we don't want. See 778 # https://bitbucket.org/cherrypy/cherrypy/issue/951 779 msg = self.server.protocol.encode( 780 'ascii') + b" 100 Continue\r\n\r\n" 781 try: 782 self.conn.wfile.write(msg) 783 except socket.error: 784 x = sys.exc_info()[1] 785 if x.args[0] not in socket_errors_to_ignore: 786 raise 787 return True
788
789 - def parse_request_uri(self, uri):
790 """Parse a Request-URI into (scheme, authority, path). 791 792 Note that Request-URI's must be one of:: 793 794 Request-URI = "*" | absoluteURI | abs_path | authority 795 796 Therefore, a Request-URI which starts with a double forward-slash 797 cannot be a "net_path":: 798 799 net_path = "//" authority [ abs_path ] 800 801 Instead, it must be interpreted as an "abs_path" with an empty first 802 path segment:: 803 804 abs_path = "/" path_segments 805 path_segments = segment *( "/" segment ) 806 segment = *pchar *( ";" param ) 807 param = *pchar 808 """ 809 if uri == ASTERISK: 810 return None, None, uri 811 812 scheme, sep, remainder = uri.partition(b'://') 813 if sep and QUESTION_MARK not in scheme: 814 # An absoluteURI. 815 # If there's a scheme (and it must be http or https), then: 816 # http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query 817 # ]] 818 authority, path_a, path_b = remainder.partition(FORWARD_SLASH) 819 return scheme.lower(), authority, path_a + path_b 820 821 if uri.startswith(FORWARD_SLASH): 822 # An abs_path. 823 return None, None, uri 824 else: 825 # An authority. 826 return None, uri, None
827
828 - def unquote_bytes(self, path):
829 """takes quoted string and unquotes % encoded values""" 830 res = path.split(b'%') 831 832 for i in range(1, len(res)): 833 item = res[i] 834 try: 835 res[i] = bytes([int(item[:2], 16)]) + item[2:] 836 except ValueError: 837 raise 838 return b''.join(res)
839
840 - def respond(self):
841 """Call the gateway and write its iterable output.""" 842 mrbs = self.server.max_request_body_size 843 if self.chunked_read: 844 self.rfile = ChunkedRFile(self.conn.rfile, mrbs) 845 else: 846 cl = int(self.inheaders.get(b"Content-Length", 0)) 847 if mrbs and mrbs < cl: 848 if not self.sent_headers: 849 self.simple_response( 850 "413 Request Entity Too Large", 851 "The entity sent with the request exceeds the " 852 "maximum allowed bytes.") 853 return 854 self.rfile = KnownLengthRFile(self.conn.rfile, cl) 855 856 self.server.gateway(self).respond() 857 858 if (self.ready and not self.sent_headers): 859 self.sent_headers = True 860 self.send_headers() 861 if self.chunked_write: 862 self.conn.wfile.write(b"0\r\n\r\n")
863
864 - def simple_response(self, status, msg=""):
865 """Write a simple response back to the client.""" 866 status = str(status) 867 buf = [bytes(self.server.protocol, "ascii") + SPACE + 868 bytes(status, "ISO-8859-1") + CRLF, 869 bytes("Content-Length: %s\r\n" % len(msg), "ISO-8859-1"), 870 b"Content-Type: text/plain\r\n"] 871 872 if status[:3] in ("413", "414"): 873 # Request Entity Too Large / Request-URI Too Long 874 self.close_connection = True 875 if self.response_protocol == 'HTTP/1.1': 876 # This will not be true for 414, since read_request_line 877 # usually raises 414 before reading the whole line, and we 878 # therefore cannot know the proper response_protocol. 879 buf.append(b"Connection: close\r\n") 880 else: 881 # HTTP/1.0 had no 413/414 status nor Connection header. 882 # Emit 400 instead and trust the message body is enough. 883 status = "400 Bad Request" 884 885 buf.append(CRLF) 886 if msg: 887 if isinstance(msg, unicodestr): 888 msg = msg.encode("ISO-8859-1") 889 buf.append(msg) 890 891 try: 892 self.conn.wfile.write(b"".join(buf)) 893 except socket.error: 894 x = sys.exc_info()[1] 895 if x.args[0] not in socket_errors_to_ignore: 896 raise
897
898 - def write(self, chunk):
899 """Write unbuffered data to the client.""" 900 if self.chunked_write and chunk: 901 buf = [bytes(hex(len(chunk)), 'ASCII')[2:], CRLF, chunk, CRLF] 902 self.conn.wfile.write(EMPTY.join(buf)) 903 else: 904 self.conn.wfile.write(chunk)
905
906 - def send_headers(self):
907 """Assert, process, and send the HTTP response message-headers. 908 909 You must set self.status, and self.outheaders before calling this. 910 """ 911 hkeys = [key.lower() for key, value in self.outheaders] 912 status = int(self.status[:3]) 913 914 if status == 413: 915 # Request Entity Too Large. Close conn to avoid garbage. 916 self.close_connection = True 917 elif b"content-length" not in hkeys: 918 # "All 1xx (informational), 204 (no content), 919 # and 304 (not modified) responses MUST NOT 920 # include a message-body." So no point chunking. 921 if status < 200 or status in (204, 205, 304): 922 pass 923 else: 924 if (self.response_protocol == 'HTTP/1.1' 925 and self.method != b'HEAD'): 926 # Use the chunked transfer-coding 927 self.chunked_write = True 928 self.outheaders.append((b"Transfer-Encoding", b"chunked")) 929 else: 930 # Closing the conn is the only way to determine len. 931 self.close_connection = True 932 933 if b"connection" not in hkeys: 934 if self.response_protocol == 'HTTP/1.1': 935 # Both server and client are HTTP/1.1 or better 936 if self.close_connection: 937 self.outheaders.append((b"Connection", b"close")) 938 else: 939 # Server and/or client are HTTP/1.0 940 if not self.close_connection: 941 self.outheaders.append((b"Connection", b"Keep-Alive")) 942 943 if (not self.close_connection) and (not self.chunked_read): 944 # Read any remaining request body data on the socket. 945 # "If an origin server receives a request that does not include an 946 # Expect request-header field with the "100-continue" expectation, 947 # the request includes a request body, and the server responds 948 # with a final status code before reading the entire request body 949 # from the transport connection, then the server SHOULD NOT close 950 # the transport connection until it has read the entire request, 951 # or until the client closes the connection. Otherwise, the client 952 # might not reliably receive the response message. However, this 953 # requirement is not be construed as preventing a server from 954 # defending itself against denial-of-service attacks, or from 955 # badly broken client implementations." 956 remaining = getattr(self.rfile, 'remaining', 0) 957 if remaining > 0: 958 self.rfile.read(remaining) 959 960 if b"date" not in hkeys: 961 self.outheaders.append(( 962 b"Date", 963 email.utils.formatdate(usegmt=True).encode('ISO-8859-1') 964 )) 965 966 if b"server" not in hkeys: 967 self.outheaders.append( 968 (b"Server", self.server.server_name.encode('ISO-8859-1'))) 969 970 buf = [self.server.protocol.encode( 971 'ascii') + SPACE + self.status + CRLF] 972 for k, v in self.outheaders: 973 buf.append(k + COLON + SPACE + v + CRLF) 974 buf.append(CRLF) 975 self.conn.wfile.write(EMPTY.join(buf))
976 977
978 -class NoSSLError(Exception):
979 980 """Exception raised when a client speaks HTTP to an HTTPS socket.""" 981 pass
982 983
984 -class FatalSSLAlert(Exception):
985 986 """Exception raised when the SSL implementation signals a fatal alert.""" 987 pass
988 989
990 -class CP_BufferedWriter(io.BufferedWriter):
991 992 """Faux file object attached to a socket object.""" 993
994 - def write(self, b):
995 self._checkClosed() 996 if isinstance(b, str): 997 raise TypeError("can't write str to binary stream") 998 999 with self._write_lock: 1000 self._write_buf.extend(b) 1001 self._flush_unlocked() 1002 return len(b)
1003
1004 - def _flush_unlocked(self):
1005 self._checkClosed("flush of closed file") 1006 while self._write_buf: 1007 try: 1008 # ssl sockets only except 'bytes', not bytearrays 1009 # so perhaps we should conditionally wrap this for perf? 1010 n = self.raw.write(bytes(self._write_buf)) 1011 except io.BlockingIOError as e: 1012 n = e.characters_written 1013 del self._write_buf[:n]
1014 1015
1016 -def CP_makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
1017 if 'r' in mode: 1018 return io.BufferedReader(socket.SocketIO(sock, mode), bufsize) 1019 else: 1020 return CP_BufferedWriter(socket.SocketIO(sock, mode), bufsize)
1021 1022
1023 -class HTTPConnection(object):
1024 1025 """An HTTP connection (active socket). 1026 1027 server: the Server object which received this connection. 1028 socket: the raw socket object (usually TCP) for this connection. 1029 makefile: a fileobject class for reading from the socket. 1030 """ 1031 1032 remote_addr = None 1033 remote_port = None 1034 ssl_env = None 1035 rbufsize = DEFAULT_BUFFER_SIZE 1036 wbufsize = DEFAULT_BUFFER_SIZE 1037 RequestHandlerClass = HTTPRequest 1038
1039 - def __init__(self, server, sock, makefile=CP_makefile):
1040 self.server = server 1041 self.socket = sock 1042 self.rfile = makefile(sock, "rb", self.rbufsize) 1043 self.wfile = makefile(sock, "wb", self.wbufsize) 1044 self.requests_seen = 0
1045
1046 - def communicate(self):
1047 """Read each request and respond appropriately.""" 1048 request_seen = False 1049 try: 1050 while True: 1051 # (re)set req to None so that if something goes wrong in 1052 # the RequestHandlerClass constructor, the error doesn't 1053 # get written to the previous request. 1054 req = None 1055 req = self.RequestHandlerClass(self.server, self) 1056 1057 # This order of operations should guarantee correct pipelining. 1058 req.parse_request() 1059 if self.server.stats['Enabled']: 1060 self.requests_seen += 1 1061 if not req.ready: 1062 # Something went wrong in the parsing (and the server has 1063 # probably already made a simple_response). Return and 1064 # let the conn close. 1065 return 1066 1067 request_seen = True 1068 req.respond() 1069 if req.close_connection: 1070 return 1071 except socket.error: 1072 e = sys.exc_info()[1] 1073 errnum = e.args[0] 1074 # sadly SSL sockets return a different (longer) time out string 1075 if ( 1076 errnum == 'timed out' or 1077 errnum == 'The read operation timed out' 1078 ): 1079 # Don't error if we're between requests; only error 1080 # if 1) no request has been started at all, or 2) we're 1081 # in the middle of a request. 1082 # See https://bitbucket.org/cherrypy/cherrypy/issue/853 1083 if (not request_seen) or (req and req.started_request): 1084 # Don't bother writing the 408 if the response 1085 # has already started being written. 1086 if req and not req.sent_headers: 1087 try: 1088 req.simple_response("408 Request Timeout") 1089 except FatalSSLAlert: 1090 # Close the connection. 1091 return 1092 elif errnum not in socket_errors_to_ignore: 1093 self.server.error_log("socket.error %s" % repr(errnum), 1094 level=logging.WARNING, traceback=True) 1095 if req and not req.sent_headers: 1096 try: 1097 req.simple_response("500 Internal Server Error") 1098 except FatalSSLAlert: 1099 # Close the connection. 1100 return 1101 return 1102 except (KeyboardInterrupt, SystemExit): 1103 raise 1104 except FatalSSLAlert: 1105 # Close the connection. 1106 return 1107 except NoSSLError: 1108 if req and not req.sent_headers: 1109 # Unwrap our wfile 1110 self.wfile = CP_makefile( 1111 self.socket._sock, "wb", self.wbufsize) 1112 req.simple_response( 1113 "400 Bad Request", 1114 "The client sent a plain HTTP request, but this server " 1115 "only speaks HTTPS on this port.") 1116 self.linger = True 1117 except Exception: 1118 e = sys.exc_info()[1] 1119 self.server.error_log(repr(e), level=logging.ERROR, traceback=True) 1120 if req and not req.sent_headers: 1121 try: 1122 req.simple_response("500 Internal Server Error") 1123 except FatalSSLAlert: 1124 # Close the connection. 1125 return
1126 1127 linger = False 1128
1129 - def close(self):
1130 """Close the socket underlying this connection.""" 1131 self.rfile.close() 1132 1133 if not self.linger: 1134 # Python's socket module does NOT call close on the kernel 1135 # socket when you call socket.close(). We do so manually here 1136 # because we want this server to send a FIN TCP segment 1137 # immediately. Note this must be called *before* calling 1138 # socket.close(), because the latter drops its reference to 1139 # the kernel socket. 1140 # Python 3 *probably* fixed this with socket._real_close; 1141 # hard to tell. 1142 # self.socket._sock.close() 1143 self.socket.close() 1144 else: 1145 # On the other hand, sometimes we want to hang around for a bit 1146 # to make sure the client has a chance to read our entire 1147 # response. Skipping the close() calls here delays the FIN 1148 # packet until the socket object is garbage-collected later. 1149 # Someday, perhaps, we'll do the full lingering_close that 1150 # Apache does, but not today. 1151 pass
1152 1153
1154 -class TrueyZero(object):
1155 1156 """An object which equals and does math like the integer 0 but evals True. 1157 """ 1158
1159 - def __add__(self, other):
1160 return other
1161
1162 - def __radd__(self, other):
1163 return other
1164 trueyzero = TrueyZero() 1165 1166 1167 _SHUTDOWNREQUEST = None 1168 1169
1170 -class WorkerThread(threading.Thread):
1171 1172 """Thread which continuously polls a Queue for Connection objects. 1173 1174 Due to the timing issues of polling a Queue, a WorkerThread does not 1175 check its own 'ready' flag after it has started. To stop the thread, 1176 it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue 1177 (one for each running WorkerThread). 1178 """ 1179 1180 conn = None 1181 """The current connection pulled off the Queue, or None.""" 1182 1183 server = None 1184 """The HTTP Server which spawned this thread, and which owns the 1185 Queue and is placing active connections into it.""" 1186 1187 ready = False 1188 """A simple flag for the calling server to know when this thread 1189 has begun polling the Queue.""" 1190
1191 - def __init__(self, server):
1192 self.ready = False 1193 self.server = server 1194 1195 self.requests_seen = 0 1196 self.bytes_read = 0 1197 self.bytes_written = 0 1198 self.start_time = None 1199 self.work_time = 0 1200 self.stats = { 1201 'Requests': lambda s: self.requests_seen + ( 1202 (self.start_time is None) and 1203 trueyzero or 1204 self.conn.requests_seen 1205 ), 1206 'Bytes Read': lambda s: self.bytes_read + ( 1207 (self.start_time is None) and 1208 trueyzero or 1209 self.conn.rfile.bytes_read 1210 ), 1211 'Bytes Written': lambda s: self.bytes_written + ( 1212 (self.start_time is None) and 1213 trueyzero or 1214 self.conn.wfile.bytes_written 1215 ), 1216 'Work Time': lambda s: self.work_time + ( 1217 (self.start_time is None) and 1218 trueyzero or 1219 time.time() - self.start_time 1220 ), 1221 'Read Throughput': lambda s: s['Bytes Read'](s) / ( 1222 s['Work Time'](s) or 1e-6), 1223 'Write Throughput': lambda s: s['Bytes Written'](s) / ( 1224 s['Work Time'](s) or 1e-6), 1225 } 1226 threading.Thread.__init__(self)
1227
1228 - def run(self):
1229 self.server.stats['Worker Threads'][self.getName()] = self.stats 1230 try: 1231 self.ready = True 1232 while True: 1233 conn = self.server.requests.get() 1234 if conn is _SHUTDOWNREQUEST: 1235 return 1236 1237 self.conn = conn 1238 if self.server.stats['Enabled']: 1239 self.start_time = time.time() 1240 try: 1241 conn.communicate() 1242 finally: 1243 conn.close() 1244 if self.server.stats['Enabled']: 1245 self.requests_seen += self.conn.requests_seen 1246 self.bytes_read += self.conn.rfile.bytes_read 1247 self.bytes_written += self.conn.wfile.bytes_written 1248 self.work_time += time.time() - self.start_time 1249 self.start_time = None 1250 self.conn = None 1251 except (KeyboardInterrupt, SystemExit): 1252 exc = sys.exc_info()[1] 1253 self.server.interrupt = exc
1254 1255
1256 -class ThreadPool(object):
1257 1258 """A Request Queue for an HTTPServer which pools threads. 1259 1260 ThreadPool objects must provide min, get(), put(obj), start() 1261 and stop(timeout) attributes. 1262 """ 1263
1264 - def __init__(self, server, min=10, max=-1):
1265 self.server = server 1266 self.min = min 1267 self.max = max 1268 self._threads = [] 1269 self._queue = queue.Queue() 1270 self.get = self._queue.get
1271
1272 - def start(self):
1273 """Start the pool of threads.""" 1274 for i in range(self.min): 1275 self._threads.append(WorkerThread(self.server)) 1276 for worker in self._threads: 1277 worker.setName("CP Server " + worker.getName()) 1278 worker.start() 1279 for worker in self._threads: 1280 while not worker.ready: 1281 time.sleep(.1)
1282
1283 - def _get_idle(self):
1284 """Number of worker threads which are idle. Read-only.""" 1285 return len([t for t in self._threads if t.conn is None])
1286 idle = property(_get_idle, doc=_get_idle.__doc__) 1287
1288 - def put(self, obj):
1289 self._queue.put(obj) 1290 if obj is _SHUTDOWNREQUEST: 1291 return
1292
1293 - def grow(self, amount):
1294 """Spawn new worker threads (not above self.max).""" 1295 if self.max > 0: 1296 budget = max(self.max - len(self._threads), 0) 1297 else: 1298 # self.max <= 0 indicates no maximum 1299 budget = float('inf') 1300 1301 n_new = min(amount, budget) 1302 1303 workers = [self._spawn_worker() for i in range(n_new)] 1304 while not all(worker.ready for worker in workers): 1305 time.sleep(.1) 1306 self._threads.extend(workers)
1307
1308 - def _spawn_worker(self):
1309 worker = WorkerThread(self.server) 1310 worker.setName("CP Server " + worker.getName()) 1311 worker.start() 1312 return worker
1313
1314 - def shrink(self, amount):
1315 """Kill off worker threads (not below self.min).""" 1316 # Grow/shrink the pool if necessary. 1317 # Remove any dead threads from our list 1318 for t in self._threads: 1319 if not t.isAlive(): 1320 self._threads.remove(t) 1321 amount -= 1 1322 1323 # calculate the number of threads above the minimum 1324 n_extra = max(len(self._threads) - self.min, 0) 1325 1326 # don't remove more than amount 1327 n_to_remove = min(amount, n_extra) 1328 1329 # put shutdown requests on the queue equal to the number of threads 1330 # to remove. As each request is processed by a worker, that worker 1331 # will terminate and be culled from the list. 1332 for n in range(n_to_remove): 1333 self._queue.put(_SHUTDOWNREQUEST)
1334
1335 - def stop(self, timeout=5):
1336 # Must shut down threads here so the code that calls 1337 # this method can know when all threads are stopped. 1338 for worker in self._threads: 1339 self._queue.put(_SHUTDOWNREQUEST) 1340 1341 # Don't join currentThread (when stop is called inside a request). 1342 current = threading.currentThread() 1343 if timeout and timeout >= 0: 1344 endtime = time.time() + timeout 1345 while self._threads: 1346 worker = self._threads.pop() 1347 if worker is not current and worker.isAlive(): 1348 try: 1349 if timeout is None or timeout < 0: 1350 worker.join() 1351 else: 1352 remaining_time = endtime - time.time() 1353 if remaining_time > 0: 1354 worker.join(remaining_time) 1355 if worker.isAlive(): 1356 # We exhausted the timeout. 1357 # Forcibly shut down the socket. 1358 c = worker.conn 1359 if c and not c.rfile.closed: 1360 try: 1361 c.socket.shutdown(socket.SHUT_RD) 1362 except TypeError: 1363 # pyOpenSSL sockets don't take an arg 1364 c.socket.shutdown() 1365 worker.join() 1366 except (AssertionError, 1367 # Ignore repeated Ctrl-C. 1368 # See 1369 # https://bitbucket.org/cherrypy/cherrypy/issue/691. 1370 KeyboardInterrupt): 1371 pass
1372
1373 - def _get_qsize(self):
1374 return self._queue.qsize()
1375 qsize = property(_get_qsize)
1376 1377 1378 try: 1379 import fcntl 1380 except ImportError: 1381 try: 1382 from ctypes import windll, WinError 1383 import ctypes.wintypes 1384 _SetHandleInformation = windll.kernel32.SetHandleInformation 1385 _SetHandleInformation.argtypes = [ 1386 ctypes.wintypes.HANDLE, 1387 ctypes.wintypes.DWORD, 1388 ctypes.wintypes.DWORD, 1389 ] 1390 _SetHandleInformation.restype = ctypes.wintypes.BOOL 1391 except ImportError:
1392 - def prevent_socket_inheritance(sock):
1393 """Dummy function, since neither fcntl nor ctypes are available.""" 1394 pass
1395 else:
1396 - def prevent_socket_inheritance(sock):
1397 """Mark the given socket fd as non-inheritable (Windows).""" 1398 if not _SetHandleInformation(sock.fileno(), 1, 0): 1399 raise WinError()
1400 else:
1401 - def prevent_socket_inheritance(sock):
1402 """Mark the given socket fd as non-inheritable (POSIX).""" 1403 fd = sock.fileno() 1404 old_flags = fcntl.fcntl(fd, fcntl.F_GETFD) 1405 fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
1406 1407
1408 -class SSLAdapter(object):
1409 1410 """Base class for SSL driver library adapters. 1411 1412 Required methods: 1413 1414 * ``wrap(sock) -> (wrapped socket, ssl environ dict)`` 1415 * ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) -> 1416 socket file object`` 1417 """ 1418
1419 - def __init__(self, certificate, private_key, certificate_chain=None):
1423
1424 - def wrap(self, sock):
1425 raise NotImplemented
1426
1427 - def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
1428 raise NotImplemented
1429 1430
1431 -class HTTPServer(object):
1432 1433 """An HTTP server.""" 1434 1435 _bind_addr = "127.0.0.1" 1436 _interrupt = None 1437 1438 gateway = None 1439 """A Gateway instance.""" 1440 1441 minthreads = None 1442 """The minimum number of worker threads to create (default 10).""" 1443 1444 maxthreads = None 1445 """The maximum number of worker threads to create (default -1 = no limit). 1446 """ 1447 1448 server_name = None 1449 """The name of the server; defaults to socket.gethostname().""" 1450 1451 protocol = "HTTP/1.1" 1452 """The version string to write in the Status-Line of all HTTP responses. 1453 1454 For example, "HTTP/1.1" is the default. This also limits the supported 1455 features used in the response.""" 1456 1457 request_queue_size = 5 1458 """The 'backlog' arg to socket.listen(); max queued connections 1459 (default 5). 1460 """ 1461 1462 shutdown_timeout = 5 1463 """The total time, in seconds, to wait for worker threads to cleanly exit. 1464 """ 1465 1466 timeout = 10 1467 """The timeout in seconds for accepted connections (default 10).""" 1468 1469 version = "CherryPy/3.3.0" 1470 """A version string for the HTTPServer.""" 1471 1472 software = None 1473 """The value to set for the SERVER_SOFTWARE entry in the WSGI environ. 1474 1475 If None, this defaults to ``'%s Server' % self.version``.""" 1476 1477 ready = False 1478 """An internal flag which marks whether the socket is accepting 1479 connections. 1480 """ 1481 1482 max_request_header_size = 0 1483 """The maximum size, in bytes, for request headers, or 0 for no limit.""" 1484 1485 max_request_body_size = 0 1486 """The maximum size, in bytes, for request bodies, or 0 for no limit.""" 1487 1488 nodelay = True 1489 """If True (the default since 3.1), sets the TCP_NODELAY socket option.""" 1490 1491 ConnectionClass = HTTPConnection 1492 """The class to use for handling HTTP connections.""" 1493 1494 ssl_adapter = None 1495 """An instance of SSLAdapter (or a subclass). 1496 1497 You must have the corresponding SSL driver library installed.""" 1498
1499 - def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1, 1500 server_name=None):
1501 self.bind_addr = bind_addr 1502 self.gateway = gateway 1503 1504 self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads) 1505 1506 if not server_name: 1507 server_name = socket.gethostname() 1508 self.server_name = server_name 1509 self.clear_stats()
1510
1511 - def clear_stats(self):
1512 self._start_time = None 1513 self._run_time = 0 1514 self.stats = { 1515 'Enabled': False, 1516 'Bind Address': lambda s: repr(self.bind_addr), 1517 'Run time': lambda s: (not s['Enabled']) and -1 or self.runtime(), 1518 'Accepts': 0, 1519 'Accepts/sec': lambda s: s['Accepts'] / self.runtime(), 1520 'Queue': lambda s: getattr(self.requests, "qsize", None), 1521 'Threads': lambda s: len(getattr(self.requests, "_threads", [])), 1522 'Threads Idle': lambda s: getattr(self.requests, "idle", None), 1523 'Socket Errors': 0, 1524 'Requests': lambda s: (not s['Enabled']) and -1 or sum( 1525 [w['Requests'](w) for w in s['Worker Threads'].values()], 0), 1526 'Bytes Read': lambda s: (not s['Enabled']) and -1 or sum( 1527 [w['Bytes Read'](w) for w in s['Worker Threads'].values()], 0), 1528 'Bytes Written': lambda s: (not s['Enabled']) and -1 or sum( 1529 [w['Bytes Written'](w) for w in s['Worker Threads'].values()], 1530 0), 1531 'Work Time': lambda s: (not s['Enabled']) and -1 or sum( 1532 [w['Work Time'](w) for w in s['Worker Threads'].values()], 0), 1533 'Read Throughput': lambda s: (not s['Enabled']) and -1 or sum( 1534 [w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6) 1535 for w in s['Worker Threads'].values()], 0), 1536 'Write Throughput': lambda s: (not s['Enabled']) and -1 or sum( 1537 [w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6) 1538 for w in s['Worker Threads'].values()], 0), 1539 'Worker Threads': {}, 1540 } 1541 logging.statistics["CherryPy HTTPServer %d" % id(self)] = self.stats
1542
1543 - def runtime(self):
1544 if self._start_time is None: 1545 return self._run_time 1546 else: 1547 return self._run_time + (time.time() - self._start_time)
1548
1549 - def __str__(self):
1550 return "%s.%s(%r)" % (self.__module__, self.__class__.__name__, 1551 self.bind_addr)
1552
1553 - def _get_bind_addr(self):
1554 return self._bind_addr
1555
1556 - def _set_bind_addr(self, value):
1557 if isinstance(value, tuple) and value[0] in ('', None): 1558 # Despite the socket module docs, using '' does not 1559 # allow AI_PASSIVE to work. Passing None instead 1560 # returns '0.0.0.0' like we want. In other words: 1561 # host AI_PASSIVE result 1562 # '' Y 192.168.x.y 1563 # '' N 192.168.x.y 1564 # None Y 0.0.0.0 1565 # None N 127.0.0.1 1566 # But since you can get the same effect with an explicit 1567 # '0.0.0.0', we deny both the empty string and None as values. 1568 raise ValueError("Host values of '' or None are not allowed. " 1569 "Use '0.0.0.0' (IPv4) or '::' (IPv6) instead " 1570 "to listen on all active interfaces.") 1571 self._bind_addr = value
1572 bind_addr = property( 1573 _get_bind_addr, 1574 _set_bind_addr, 1575 doc="""The interface on which to listen for connections. 1576 1577 For TCP sockets, a (host, port) tuple. Host values may be any IPv4 1578 or IPv6 address, or any valid hostname. The string 'localhost' is a 1579 synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6). 1580 The string '0.0.0.0' is a special IPv4 entry meaning "any active 1581 interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for 1582 IPv6. The empty string or None are not allowed. 1583 1584 For UNIX sockets, supply the filename as a string.""") 1585
1586 - def start(self):
1587 """Run the server forever.""" 1588 # We don't have to trap KeyboardInterrupt or SystemExit here, 1589 # because cherrpy.server already does so, calling self.stop() for us. 1590 # If you're using this server with another framework, you should 1591 # trap those exceptions in whatever code block calls start(). 1592 self._interrupt = None 1593 1594 if self.software is None: 1595 self.software = "%s Server" % self.version 1596 1597 # Select the appropriate socket 1598 if isinstance(self.bind_addr, basestring): 1599 # AF_UNIX socket 1600 1601 # So we can reuse the socket... 1602 try: 1603 os.unlink(self.bind_addr) 1604 except: 1605 pass 1606 1607 # So everyone can access the socket... 1608 try: 1609 os.chmod(self.bind_addr, 511) # 0777 1610 except: 1611 pass 1612 1613 info = [ 1614 (socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)] 1615 else: 1616 # AF_INET or AF_INET6 socket 1617 # Get the correct address family for our host (allows IPv6 1618 # addresses) 1619 host, port = self.bind_addr 1620 try: 1621 info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, 1622 socket.SOCK_STREAM, 0, 1623 socket.AI_PASSIVE) 1624 except socket.gaierror: 1625 if ':' in self.bind_addr[0]: 1626 info = [(socket.AF_INET6, socket.SOCK_STREAM, 1627 0, "", self.bind_addr + (0, 0))] 1628 else: 1629 info = [(socket.AF_INET, socket.SOCK_STREAM, 1630 0, "", self.bind_addr)] 1631 1632 self.socket = None 1633 msg = "No socket could be created" 1634 for res in info: 1635 af, socktype, proto, canonname, sa = res 1636 try: 1637 self.bind(af, socktype, proto) 1638 except socket.error as serr: 1639 msg = "%s -- (%s: %s)" % (msg, sa, serr) 1640 if self.socket: 1641 self.socket.close() 1642 self.socket = None 1643 continue 1644 break 1645 if not self.socket: 1646 raise socket.error(msg) 1647 1648 # Timeout so KeyboardInterrupt can be caught on Win32 1649 self.socket.settimeout(1) 1650 self.socket.listen(self.request_queue_size) 1651 1652 # Create worker threads 1653 self.requests.start() 1654 1655 self.ready = True 1656 self._start_time = time.time() 1657 while self.ready: 1658 try: 1659 self.tick() 1660 except (KeyboardInterrupt, SystemExit): 1661 raise 1662 except: 1663 self.error_log("Error in HTTPServer.tick", level=logging.ERROR, 1664 traceback=True) 1665 if self.interrupt: 1666 while self.interrupt is True: 1667 # Wait for self.stop() to complete. See _set_interrupt. 1668 time.sleep(0.1) 1669 if self.interrupt: 1670 raise self.interrupt
1671
1672 - def error_log(self, msg="", level=20, traceback=False):
1673 # Override this in subclasses as desired 1674 sys.stderr.write(msg + '\n') 1675 sys.stderr.flush() 1676 if traceback: 1677 tblines = format_exc() 1678 sys.stderr.write(tblines) 1679 sys.stderr.flush()
1680
1681 - def bind(self, family, type, proto=0):
1682 """Create (or recreate) the actual socket object.""" 1683 self.socket = socket.socket(family, type, proto) 1684 prevent_socket_inheritance(self.socket) 1685 self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 1686 if self.nodelay and not isinstance(self.bind_addr, str): 1687 self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) 1688 1689 if self.ssl_adapter is not None: 1690 self.socket = self.ssl_adapter.bind(self.socket) 1691 1692 # If listening on the IPV6 any address ('::' = IN6ADDR_ANY), 1693 # activate dual-stack. See 1694 # https://bitbucket.org/cherrypy/cherrypy/issue/871. 1695 if (hasattr(socket, 'AF_INET6') and family == socket.AF_INET6 1696 and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')): 1697 try: 1698 self.socket.setsockopt( 1699 socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0) 1700 except (AttributeError, socket.error): 1701 # Apparently, the socket option is not available in 1702 # this machine's TCP stack 1703 pass 1704 1705 self.socket.bind(self.bind_addr)
1706
1707 - def tick(self):
1708 """Accept a new connection and put it on the Queue.""" 1709 try: 1710 s, addr = self.socket.accept() 1711 if self.stats['Enabled']: 1712 self.stats['Accepts'] += 1 1713 if not self.ready: 1714 return 1715 1716 prevent_socket_inheritance(s) 1717 if hasattr(s, 'settimeout'): 1718 s.settimeout(self.timeout) 1719 1720 makefile = CP_makefile 1721 ssl_env = {} 1722 # if ssl cert and key are set, we try to be a secure HTTP server 1723 if self.ssl_adapter is not None: 1724 try: 1725 s, ssl_env = self.ssl_adapter.wrap(s) 1726 except NoSSLError: 1727 msg = ("The client sent a plain HTTP request, but " 1728 "this server only speaks HTTPS on this port.") 1729 buf = ["%s 400 Bad Request\r\n" % self.protocol, 1730 "Content-Length: %s\r\n" % len(msg), 1731 "Content-Type: text/plain\r\n\r\n", 1732 msg] 1733 1734 wfile = makefile(s, "wb", DEFAULT_BUFFER_SIZE) 1735 try: 1736 wfile.write("".join(buf).encode('ISO-8859-1')) 1737 except socket.error: 1738 x = sys.exc_info()[1] 1739 if x.args[0] not in socket_errors_to_ignore: 1740 raise 1741 return 1742 if not s: 1743 return 1744 makefile = self.ssl_adapter.makefile 1745 # Re-apply our timeout since we may have a new socket object 1746 if hasattr(s, 'settimeout'): 1747 s.settimeout(self.timeout) 1748 1749 conn = self.ConnectionClass(self, s, makefile) 1750 1751 if not isinstance(self.bind_addr, basestring): 1752 # optional values 1753 # Until we do DNS lookups, omit REMOTE_HOST 1754 if addr is None: # sometimes this can happen 1755 # figure out if AF_INET or AF_INET6. 1756 if len(s.getsockname()) == 2: 1757 # AF_INET 1758 addr = ('0.0.0.0', 0) 1759 else: 1760 # AF_INET6 1761 addr = ('::', 0) 1762 conn.remote_addr = addr[0] 1763 conn.remote_port = addr[1] 1764 1765 conn.ssl_env = ssl_env 1766 1767 self.requests.put(conn) 1768 except socket.timeout: 1769 # The only reason for the timeout in start() is so we can 1770 # notice keyboard interrupts on Win32, which don't interrupt 1771 # accept() by default 1772 return 1773 except socket.error: 1774 x = sys.exc_info()[1] 1775 if self.stats['Enabled']: 1776 self.stats['Socket Errors'] += 1 1777 if x.args[0] in socket_error_eintr: 1778 # I *think* this is right. EINTR should occur when a signal 1779 # is received during the accept() call; all docs say retry 1780 # the call, and I *think* I'm reading it right that Python 1781 # will then go ahead and poll for and handle the signal 1782 # elsewhere. See 1783 # https://bitbucket.org/cherrypy/cherrypy/issue/707. 1784 return 1785 if x.args[0] in socket_errors_nonblocking: 1786 # Just try again. See 1787 # https://bitbucket.org/cherrypy/cherrypy/issue/479. 1788 return 1789 if x.args[0] in socket_errors_to_ignore: 1790 # Our socket was closed. 1791 # See https://bitbucket.org/cherrypy/cherrypy/issue/686. 1792 return 1793 raise
1794
1795 - def _get_interrupt(self):
1796 return self._interrupt
1797
1798 - def _set_interrupt(self, interrupt):
1799 self._interrupt = True 1800 self.stop() 1801 self._interrupt = interrupt
1802 interrupt = property(_get_interrupt, _set_interrupt, 1803 doc="Set this to an Exception instance to " 1804 "interrupt the server.") 1805
1806 - def stop(self):
1807 """Gracefully shutdown a server that is serving forever.""" 1808 self.ready = False 1809 if self._start_time is not None: 1810 self._run_time += (time.time() - self._start_time) 1811 self._start_time = None 1812 1813 sock = getattr(self, "socket", None) 1814 if sock: 1815 if not isinstance(self.bind_addr, basestring): 1816 # Touch our own socket to make accept() return immediately. 1817 try: 1818 host, port = sock.getsockname()[:2] 1819 except socket.error: 1820 x = sys.exc_info()[1] 1821 if x.args[0] not in socket_errors_to_ignore: 1822 # Changed to use error code and not message 1823 # See 1824 # https://bitbucket.org/cherrypy/cherrypy/issue/860. 1825 raise 1826 else: 1827 # Note that we're explicitly NOT using AI_PASSIVE, 1828 # here, because we want an actual IP to touch. 1829 # localhost won't work if we've bound to a public IP, 1830 # but it will if we bound to '0.0.0.0' (INADDR_ANY). 1831 for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, 1832 socket.SOCK_STREAM): 1833 af, socktype, proto, canonname, sa = res 1834 s = None 1835 try: 1836 s = socket.socket(af, socktype, proto) 1837 # See 1838 # http://groups.google.com/group/cherrypy-users/ 1839 # browse_frm/thread/bbfe5eb39c904fe0 1840 s.settimeout(1.0) 1841 s.connect((host, port)) 1842 s.close() 1843 except socket.error: 1844 if s: 1845 s.close() 1846 if hasattr(sock, "close"): 1847 sock.close() 1848 self.socket = None 1849 1850 self.requests.stop(self.shutdown_timeout)
1851 1852
1853 -class Gateway(object):
1854 1855 """A base class to interface HTTPServer with other systems, such as WSGI. 1856 """ 1857
1858 - def __init__(self, req):
1859 self.req = req
1860
1861 - def respond(self):
1862 """Process the current request. Must be overridden in a subclass.""" 1863 raise NotImplemented
1864 1865 1866 # These may either be wsgiserver.SSLAdapter subclasses or the string names 1867 # of such classes (in which case they will be lazily loaded). 1868 ssl_adapters = { 1869 'builtin': 'cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter', 1870 } 1871 1872
1873 -def get_ssl_adapter_class(name='builtin'):
1874 """Return an SSL adapter class for the given name.""" 1875 adapter = ssl_adapters[name.lower()] 1876 if isinstance(adapter, basestring): 1877 last_dot = adapter.rfind(".") 1878 attr_name = adapter[last_dot + 1:] 1879 mod_path = adapter[:last_dot] 1880 1881 try: 1882 mod = sys.modules[mod_path] 1883 if mod is None: 1884 raise KeyError() 1885 except KeyError: 1886 # The last [''] is important. 1887 mod = __import__(mod_path, globals(), locals(), ['']) 1888 1889 # Let an AttributeError propagate outward. 1890 try: 1891 adapter = getattr(mod, attr_name) 1892 except AttributeError: 1893 raise AttributeError("'%s' object has no attribute '%s'" 1894 % (mod_path, attr_name)) 1895 1896 return adapter
1897 1898 # ------------------------------- WSGI Stuff -------------------------------- # 1899 1900
1901 -class CherryPyWSGIServer(HTTPServer):
1902 1903 """A subclass of HTTPServer which calls a WSGI application.""" 1904 1905 wsgi_version = (1, 0) 1906 """The version of WSGI to produce.""" 1907
1908 - def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None, 1909 max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5):
1910 self.requests = ThreadPool(self, min=numthreads or 1, max=max) 1911 self.wsgi_app = wsgi_app 1912 self.gateway = wsgi_gateways[self.wsgi_version] 1913 1914 self.bind_addr = bind_addr 1915 if not server_name: 1916 server_name = socket.gethostname() 1917 self.server_name = server_name 1918 self.request_queue_size = request_queue_size 1919 1920 self.timeout = timeout 1921 self.shutdown_timeout = shutdown_timeout 1922 self.clear_stats()
1923
1924 - def _get_numthreads(self):
1925 return self.requests.min
1926
1927 - def _set_numthreads(self, value):
1928 self.requests.min = value
1929 numthreads = property(_get_numthreads, _set_numthreads)
1930 1931
1932 -class WSGIGateway(Gateway):
1933 1934 """A base class to interface HTTPServer with WSGI.""" 1935
1936 - def __init__(self, req):
1937 self.req = req 1938 self.started_response = False 1939 self.env = self.get_environ() 1940 self.remaining_bytes_out = None
1941
1942 - def get_environ(self):
1943 """Return a new environ dict targeting the given wsgi.version""" 1944 raise NotImplemented
1945
1946 - def respond(self):
1947 """Process the current request.""" 1948 response = self.req.server.wsgi_app(self.env, self.start_response) 1949 try: 1950 for chunk in response: 1951 # "The start_response callable must not actually transmit 1952 # the response headers. Instead, it must store them for the 1953 # server or gateway to transmit only after the first 1954 # iteration of the application return value that yields 1955 # a NON-EMPTY string, or upon the application's first 1956 # invocation of the write() callable." (PEP 333) 1957 if chunk: 1958 if isinstance(chunk, unicodestr): 1959 chunk = chunk.encode('ISO-8859-1') 1960 self.write(chunk) 1961 finally: 1962 if hasattr(response, "close"): 1963 response.close()
1964
1965 - def start_response(self, status, headers, exc_info=None):
1966 """WSGI callable to begin the HTTP response.""" 1967 # "The application may call start_response more than once, 1968 # if and only if the exc_info argument is provided." 1969 if self.started_response and not exc_info: 1970 raise AssertionError("WSGI start_response called a second " 1971 "time with no exc_info.") 1972 self.started_response = True 1973 1974 # "if exc_info is provided, and the HTTP headers have already been 1975 # sent, start_response must raise an error, and should raise the 1976 # exc_info tuple." 1977 if self.req.sent_headers: 1978 try: 1979 raise exc_info[0](exc_info[1]).with_traceback(exc_info[2]) 1980 finally: 1981 exc_info = None 1982 1983 # According to PEP 3333, when using Python 3, the response status 1984 # and headers must be bytes masquerading as unicode; that is, they 1985 # must be of type "str" but are restricted to code points in the 1986 # "latin-1" set. 1987 if not isinstance(status, str): 1988 raise TypeError("WSGI response status is not of type str.") 1989 self.req.status = status.encode('ISO-8859-1') 1990 1991 for k, v in headers: 1992 if not isinstance(k, str): 1993 raise TypeError( 1994 "WSGI response header key %r is not of type str." % k) 1995 if not isinstance(v, str): 1996 raise TypeError( 1997 "WSGI response header value %r is not of type str." % v) 1998 if k.lower() == 'content-length': 1999 self.remaining_bytes_out = int(v) 2000 self.req.outheaders.append( 2001 (k.encode('ISO-8859-1'), v.encode('ISO-8859-1'))) 2002 2003 return self.write
2004
2005 - def write(self, chunk):
2006 """WSGI callable to write unbuffered data to the client. 2007 2008 This method is also used internally by start_response (to write 2009 data from the iterable returned by the WSGI application). 2010 """ 2011 if not self.started_response: 2012 raise AssertionError("WSGI write called before start_response.") 2013 2014 chunklen = len(chunk) 2015 rbo = self.remaining_bytes_out 2016 if rbo is not None and chunklen > rbo: 2017 if not self.req.sent_headers: 2018 # Whew. We can send a 500 to the client. 2019 self.req.simple_response("500 Internal Server Error", 2020 "The requested resource returned " 2021 "more bytes than the declared " 2022 "Content-Length.") 2023 else: 2024 # Dang. We have probably already sent data. Truncate the chunk 2025 # to fit (so the client doesn't hang) and raise an error later. 2026 chunk = chunk[:rbo] 2027 2028 if not self.req.sent_headers: 2029 self.req.sent_headers = True 2030 self.req.send_headers() 2031 2032 self.req.write(chunk) 2033 2034 if rbo is not None: 2035 rbo -= chunklen 2036 if rbo < 0: 2037 raise ValueError( 2038 "Response body exceeds the declared Content-Length.")
2039 2040
2041 -class WSGIGateway_10(WSGIGateway):
2042 2043 """A Gateway class to interface HTTPServer with WSGI 1.0.x.""" 2044
2045 - def get_environ(self):
2046 """Return a new environ dict targeting the given wsgi.version""" 2047 req = self.req 2048 env = { 2049 # set a non-standard environ entry so the WSGI app can know what 2050 # the *real* server protocol is (and what features to support). 2051 # See http://www.faqs.org/rfcs/rfc2145.html. 2052 'ACTUAL_SERVER_PROTOCOL': req.server.protocol, 2053 'PATH_INFO': req.path.decode('ISO-8859-1'), 2054 'QUERY_STRING': req.qs.decode('ISO-8859-1'), 2055 'REMOTE_ADDR': req.conn.remote_addr or '', 2056 'REMOTE_PORT': str(req.conn.remote_port or ''), 2057 'REQUEST_METHOD': req.method.decode('ISO-8859-1'), 2058 'REQUEST_URI': req.uri.decode('ISO-8859-1'), 2059 'SCRIPT_NAME': '', 2060 'SERVER_NAME': req.server.server_name, 2061 # Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol. 2062 'SERVER_PROTOCOL': req.request_protocol.decode('ISO-8859-1'), 2063 'SERVER_SOFTWARE': req.server.software, 2064 'wsgi.errors': sys.stderr, 2065 'wsgi.input': req.rfile, 2066 'wsgi.multiprocess': False, 2067 'wsgi.multithread': True, 2068 'wsgi.run_once': False, 2069 'wsgi.url_scheme': req.scheme.decode('ISO-8859-1'), 2070 'wsgi.version': (1, 0), 2071 } 2072 if isinstance(req.server.bind_addr, basestring): 2073 # AF_UNIX. This isn't really allowed by WSGI, which doesn't 2074 # address unix domain sockets. But it's better than nothing. 2075 env["SERVER_PORT"] = "" 2076 else: 2077 env["SERVER_PORT"] = str(req.server.bind_addr[1]) 2078 2079 # Request headers 2080 for k, v in req.inheaders.items(): 2081 k = k.decode('ISO-8859-1').upper().replace("-", "_") 2082 env["HTTP_" + k] = v.decode('ISO-8859-1') 2083 2084 # CONTENT_TYPE/CONTENT_LENGTH 2085 ct = env.pop("HTTP_CONTENT_TYPE", None) 2086 if ct is not None: 2087 env["CONTENT_TYPE"] = ct 2088 cl = env.pop("HTTP_CONTENT_LENGTH", None) 2089 if cl is not None: 2090 env["CONTENT_LENGTH"] = cl 2091 2092 if req.conn.ssl_env: 2093 env.update(req.conn.ssl_env) 2094 2095 return env
2096 2097
2098 -class WSGIGateway_u0(WSGIGateway_10):
2099 2100 """A Gateway class to interface HTTPServer with WSGI u.0. 2101 2102 WSGI u.0 is an experimental protocol, which uses unicode for keys 2103 and values in both Python 2 and Python 3. 2104 """ 2105
2106 - def get_environ(self):
2107 """Return a new environ dict targeting the given wsgi.version""" 2108 req = self.req 2109 env_10 = WSGIGateway_10.get_environ(self) 2110 env = env_10.copy() 2111 env['wsgi.version'] = ('u', 0) 2112 2113 # Request-URI 2114 env.setdefault('wsgi.url_encoding', 'utf-8') 2115 try: 2116 # SCRIPT_NAME is the empty string, who cares what encoding it is? 2117 env["PATH_INFO"] = req.path.decode(env['wsgi.url_encoding']) 2118 env["QUERY_STRING"] = req.qs.decode(env['wsgi.url_encoding']) 2119 except UnicodeDecodeError: 2120 # Fall back to latin 1 so apps can transcode if needed. 2121 env['wsgi.url_encoding'] = 'ISO-8859-1' 2122 env["PATH_INFO"] = env_10["PATH_INFO"] 2123 env["QUERY_STRING"] = env_10["QUERY_STRING"] 2124 2125 return env
2126 2127 wsgi_gateways = { 2128 (1, 0): WSGIGateway_10, 2129 ('u', 0): WSGIGateway_u0, 2130 } 2131 2132
2133 -class WSGIPathInfoDispatcher(object):
2134 2135 """A WSGI dispatcher for dispatch based on the PATH_INFO. 2136 2137 apps: a dict or list of (path_prefix, app) pairs. 2138 """ 2139
2140 - def __init__(self, apps):
2141 try: 2142 apps = list(apps.items()) 2143 except AttributeError: 2144 pass 2145 2146 # Sort the apps by len(path), descending 2147 apps.sort() 2148 apps.reverse() 2149 2150 # The path_prefix strings must start, but not end, with a slash. 2151 # Use "" instead of "/". 2152 self.apps = [(p.rstrip("/"), a) for p, a in apps]
2153
2154 - def __call__(self, environ, start_response):
2155 path = environ["PATH_INFO"] or "/" 2156 for p, app in self.apps: 2157 # The apps list should be sorted by length, descending. 2158 if path.startswith(p + "/") or path == p: 2159 environ = environ.copy() 2160 environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p 2161 environ["PATH_INFO"] = path[len(p):] 2162 return app(environ, start_response) 2163 2164 start_response('404 Not Found', [('Content-Type', 'text/plain'), 2165 ('Content-Length', '0')]) 2166 return ['']
2167