mirror of
https://github.com/Tygs/0bin.git
synced 2023-08-10 21:13:00 +03:00
Porting zerobin to python 3
This commit is contained in:
@@ -3,16 +3,54 @@
|
||||
# Deprecated in CherryPy 3.2 -- remove in CherryPy 3.3
|
||||
from cherrypy.lib.reprconf import unrepr, modules, attributes
|
||||
|
||||
class file_generator(object):
|
||||
"""Yield the given input (a file object) in chunks (default 64k). (Core)"""
|
||||
def is_iterator(obj):
|
||||
'''Returns a boolean indicating if the object provided implements
|
||||
the iterator protocol (i.e. like a generator). This will return
|
||||
false for objects which iterable, but not iterators themselves.'''
|
||||
from types import GeneratorType
|
||||
if isinstance(obj, GeneratorType):
|
||||
return True
|
||||
elif not hasattr(obj, '__iter__'):
|
||||
return False
|
||||
else:
|
||||
# Types which implement the protocol must return themselves when
|
||||
# invoking 'iter' upon them.
|
||||
return iter(obj) is obj
|
||||
|
||||
def is_closable_iterator(obj):
|
||||
|
||||
# Not an iterator.
|
||||
if not is_iterator(obj):
|
||||
return False
|
||||
|
||||
# A generator - the easiest thing to deal with.
|
||||
import inspect
|
||||
if inspect.isgenerator(obj):
|
||||
return True
|
||||
|
||||
# A custom iterator. Look for a close method...
|
||||
if not (hasattr(obj, 'close') and callable(obj.close)):
|
||||
return False
|
||||
|
||||
# ... which doesn't require any arguments.
|
||||
try:
|
||||
inspect.getcallargs(obj.close)
|
||||
except TypeError:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
class file_generator(object):
|
||||
|
||||
"""Yield the given input (a file object) in chunks (default 64k). (Core)"""
|
||||
|
||||
def __init__(self, input, chunkSize=65536):
|
||||
self.input = input
|
||||
self.chunkSize = chunkSize
|
||||
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
|
||||
def __next__(self):
|
||||
chunk = self.input.read(self.chunkSize)
|
||||
if chunk:
|
||||
@@ -23,6 +61,7 @@ class file_generator(object):
|
||||
raise StopIteration()
|
||||
next = __next__
|
||||
|
||||
|
||||
def file_generator_limited(fileobj, count, chunk_size=65536):
|
||||
"""Yield the given file object in chunks, stopping after `count`
|
||||
bytes has been emitted. Default chunk size is 64kB. (Core)
|
||||
@@ -36,6 +75,7 @@ def file_generator_limited(fileobj, count, chunk_size=65536):
|
||||
remaining -= chunklen
|
||||
yield chunk
|
||||
|
||||
|
||||
def set_vary_header(response, header_name):
|
||||
"Add a Vary header to a response"
|
||||
varies = response.headers.get("Vary", "")
|
||||
|
||||
@@ -3,25 +3,27 @@ from cherrypy.lib import httpauth
|
||||
|
||||
|
||||
def check_auth(users, encrypt=None, realm=None):
|
||||
"""If an authorization header contains credentials, return True, else False."""
|
||||
"""If an authorization header contains credentials, return True or False.
|
||||
"""
|
||||
request = cherrypy.serving.request
|
||||
if 'authorization' in request.headers:
|
||||
# make sure the provided credentials are correctly set
|
||||
ah = httpauth.parseAuthorization(request.headers['authorization'])
|
||||
if ah is None:
|
||||
raise cherrypy.HTTPError(400, 'Bad Request')
|
||||
|
||||
|
||||
if not encrypt:
|
||||
encrypt = httpauth.DIGEST_AUTH_ENCODERS[httpauth.MD5]
|
||||
|
||||
|
||||
if hasattr(users, '__call__'):
|
||||
try:
|
||||
# backward compatibility
|
||||
users = users() # expect it to return a dictionary
|
||||
|
||||
users = users() # expect it to return a dictionary
|
||||
|
||||
if not isinstance(users, dict):
|
||||
raise ValueError("Authentication users must be a dictionary")
|
||||
|
||||
raise ValueError(
|
||||
"Authentication users must be a dictionary")
|
||||
|
||||
# fetch the user password
|
||||
password = users.get(ah["username"], None)
|
||||
except TypeError:
|
||||
@@ -30,58 +32,66 @@ def check_auth(users, encrypt=None, realm=None):
|
||||
else:
|
||||
if not isinstance(users, dict):
|
||||
raise ValueError("Authentication users must be a dictionary")
|
||||
|
||||
|
||||
# fetch the user password
|
||||
password = users.get(ah["username"], None)
|
||||
|
||||
|
||||
# validate the authorization by re-computing it here
|
||||
# and compare it with what the user-agent provided
|
||||
if httpauth.checkResponse(ah, password, method=request.method,
|
||||
encrypt=encrypt, realm=realm):
|
||||
request.login = ah["username"]
|
||||
return True
|
||||
|
||||
|
||||
request.login = False
|
||||
return False
|
||||
|
||||
|
||||
def basic_auth(realm, users, encrypt=None, debug=False):
|
||||
"""If auth fails, raise 401 with a basic authentication header.
|
||||
|
||||
|
||||
realm
|
||||
A string containing the authentication realm.
|
||||
|
||||
|
||||
users
|
||||
A dict of the form: {username: password} or a callable returning a dict.
|
||||
|
||||
A dict of the form: {username: password} or a callable returning
|
||||
a dict.
|
||||
|
||||
encrypt
|
||||
callable used to encrypt the password returned from the user-agent.
|
||||
if None it defaults to a md5 encryption.
|
||||
|
||||
|
||||
"""
|
||||
if check_auth(users, encrypt):
|
||||
if debug:
|
||||
cherrypy.log('Auth successful', 'TOOLS.BASIC_AUTH')
|
||||
return
|
||||
|
||||
|
||||
# inform the user-agent this path is protected
|
||||
cherrypy.serving.response.headers['www-authenticate'] = httpauth.basicAuth(realm)
|
||||
|
||||
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
|
||||
cherrypy.serving.response.headers[
|
||||
'www-authenticate'] = httpauth.basicAuth(realm)
|
||||
|
||||
raise cherrypy.HTTPError(
|
||||
401, "You are not authorized to access that resource")
|
||||
|
||||
|
||||
def digest_auth(realm, users, debug=False):
|
||||
"""If auth fails, raise 401 with a digest authentication header.
|
||||
|
||||
|
||||
realm
|
||||
A string containing the authentication realm.
|
||||
users
|
||||
A dict of the form: {username: password} or a callable returning a dict.
|
||||
A dict of the form: {username: password} or a callable returning
|
||||
a dict.
|
||||
"""
|
||||
if check_auth(users, realm=realm):
|
||||
if debug:
|
||||
cherrypy.log('Auth successful', 'TOOLS.DIGEST_AUTH')
|
||||
return
|
||||
|
||||
|
||||
# inform the user-agent this path is protected
|
||||
cherrypy.serving.response.headers['www-authenticate'] = httpauth.digestAuth(realm)
|
||||
|
||||
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
|
||||
cherrypy.serving.response.headers[
|
||||
'www-authenticate'] = httpauth.digestAuth(realm)
|
||||
|
||||
raise cherrypy.HTTPError(
|
||||
401, "You are not authorized to access that resource")
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
# vim:ts=4:sw=4:expandtab:fileencoding=utf-8
|
||||
|
||||
__doc__ = """This module provides a CherryPy 3.x tool which implements
|
||||
the server-side of HTTP Basic Access Authentication, as described in :rfc:`2617`.
|
||||
the server-side of HTTP Basic Access Authentication, as described in
|
||||
:rfc:`2617`.
|
||||
|
||||
Example usage, using the built-in checkpassword_dict function which uses a dict
|
||||
as the credentials store::
|
||||
@@ -60,13 +61,13 @@ def basic_auth(realm, checkpassword, debug=False):
|
||||
username and password are the values obtained from the request's
|
||||
'authorization' header. If authentication succeeds, checkpassword
|
||||
returns True, else it returns False.
|
||||
|
||||
|
||||
"""
|
||||
|
||||
|
||||
if '"' in realm:
|
||||
raise ValueError('Realm cannot contain the " (quote) character.')
|
||||
request = cherrypy.serving.request
|
||||
|
||||
|
||||
auth_header = request.headers.get('authorization')
|
||||
if auth_header is not None:
|
||||
try:
|
||||
@@ -77,11 +78,13 @@ def basic_auth(realm, checkpassword, debug=False):
|
||||
if debug:
|
||||
cherrypy.log('Auth succeeded', 'TOOLS.AUTH_BASIC')
|
||||
request.login = username
|
||||
return # successful authentication
|
||||
except (ValueError, binascii.Error): # split() error, base64.decodestring() error
|
||||
return # successful authentication
|
||||
# split() error, base64.decodestring() error
|
||||
except (ValueError, binascii.Error):
|
||||
raise cherrypy.HTTPError(400, 'Bad Request')
|
||||
|
||||
# Respond with 401 status and a WWW-Authenticate header
|
||||
cherrypy.serving.response.headers['www-authenticate'] = 'Basic realm="%s"' % realm
|
||||
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
|
||||
|
||||
# Respond with 401 status and a WWW-Authenticate header
|
||||
cherrypy.serving.response.headers[
|
||||
'www-authenticate'] = 'Basic realm="%s"' % realm
|
||||
raise cherrypy.HTTPError(
|
||||
401, "You are not authorized to access that resource")
|
||||
|
||||
@@ -41,6 +41,8 @@ def TRACE(msg):
|
||||
|
||||
# Three helper functions for users of the tool, providing three variants
|
||||
# of get_ha1() functions for three different kinds of credential stores.
|
||||
|
||||
|
||||
def get_ha1_dict_plain(user_password_dict):
|
||||
"""Returns a get_ha1 function which obtains a plaintext password from a
|
||||
dictionary of the form: {username : password}.
|
||||
@@ -57,6 +59,7 @@ def get_ha1_dict_plain(user_password_dict):
|
||||
|
||||
return get_ha1
|
||||
|
||||
|
||||
def get_ha1_dict(user_ha1_dict):
|
||||
"""Returns a get_ha1 function which obtains a HA1 password hash from a
|
||||
dictionary of the form: {username : HA1}.
|
||||
@@ -67,10 +70,11 @@ def get_ha1_dict(user_ha1_dict):
|
||||
argument to digest_auth().
|
||||
"""
|
||||
def get_ha1(realm, username):
|
||||
return user_ha1_dict.get(user)
|
||||
return user_ha1_dict.get(username)
|
||||
|
||||
return get_ha1
|
||||
|
||||
|
||||
def get_ha1_file_htdigest(filename):
|
||||
"""Returns a get_ha1 function which obtains a HA1 password hash from a
|
||||
flat file with lines of the same format as that produced by the Apache
|
||||
@@ -99,18 +103,19 @@ def get_ha1_file_htdigest(filename):
|
||||
|
||||
|
||||
def synthesize_nonce(s, key, timestamp=None):
|
||||
"""Synthesize a nonce value which resists spoofing and can be checked for staleness.
|
||||
Returns a string suitable as the value for 'nonce' in the www-authenticate header.
|
||||
"""Synthesize a nonce value which resists spoofing and can be checked
|
||||
for staleness. Returns a string suitable as the value for 'nonce' in
|
||||
the www-authenticate header.
|
||||
|
||||
s
|
||||
A string related to the resource, such as the hostname of the server.
|
||||
|
||||
key
|
||||
A secret string known only to the server.
|
||||
|
||||
|
||||
timestamp
|
||||
An integer seconds-since-the-epoch timestamp
|
||||
|
||||
|
||||
"""
|
||||
if timestamp is None:
|
||||
timestamp = int(time.time())
|
||||
@@ -125,6 +130,7 @@ def H(s):
|
||||
|
||||
|
||||
class HttpDigestAuthorization (object):
|
||||
|
||||
"""Class to parse a Digest Authorization header and perform re-calculation
|
||||
of the digest.
|
||||
"""
|
||||
@@ -135,7 +141,7 @@ class HttpDigestAuthorization (object):
|
||||
def __init__(self, auth_header, http_method, debug=False):
|
||||
self.http_method = http_method
|
||||
self.debug = debug
|
||||
scheme, params = auth_header.split(" ", 1)
|
||||
scheme, params = auth_header.split(" ", 1)
|
||||
self.scheme = scheme.lower()
|
||||
if self.scheme != 'digest':
|
||||
raise ValueError('Authorization scheme is not "Digest"')
|
||||
@@ -151,84 +157,95 @@ class HttpDigestAuthorization (object):
|
||||
self.nonce = paramsd.get('nonce')
|
||||
self.uri = paramsd.get('uri')
|
||||
self.method = paramsd.get('method')
|
||||
self.response = paramsd.get('response') # the response digest
|
||||
self.algorithm = paramsd.get('algorithm', 'MD5')
|
||||
self.response = paramsd.get('response') # the response digest
|
||||
self.algorithm = paramsd.get('algorithm', 'MD5').upper()
|
||||
self.cnonce = paramsd.get('cnonce')
|
||||
self.opaque = paramsd.get('opaque')
|
||||
self.qop = paramsd.get('qop') # qop
|
||||
self.nc = paramsd.get('nc') # nonce count
|
||||
self.qop = paramsd.get('qop') # qop
|
||||
self.nc = paramsd.get('nc') # nonce count
|
||||
|
||||
# perform some correctness checks
|
||||
if self.algorithm not in valid_algorithms:
|
||||
raise ValueError(self.errmsg("Unsupported value for algorithm: '%s'" % self.algorithm))
|
||||
raise ValueError(
|
||||
self.errmsg("Unsupported value for algorithm: '%s'" %
|
||||
self.algorithm))
|
||||
|
||||
has_reqd = self.username and \
|
||||
self.realm and \
|
||||
self.nonce and \
|
||||
self.uri and \
|
||||
self.response
|
||||
has_reqd = (
|
||||
self.username and
|
||||
self.realm and
|
||||
self.nonce and
|
||||
self.uri and
|
||||
self.response
|
||||
)
|
||||
if not has_reqd:
|
||||
raise ValueError(self.errmsg("Not all required parameters are present."))
|
||||
raise ValueError(
|
||||
self.errmsg("Not all required parameters are present."))
|
||||
|
||||
if self.qop:
|
||||
if self.qop not in valid_qops:
|
||||
raise ValueError(self.errmsg("Unsupported value for qop: '%s'" % self.qop))
|
||||
raise ValueError(
|
||||
self.errmsg("Unsupported value for qop: '%s'" % self.qop))
|
||||
if not (self.cnonce and self.nc):
|
||||
raise ValueError(self.errmsg("If qop is sent then cnonce and nc MUST be present"))
|
||||
raise ValueError(
|
||||
self.errmsg("If qop is sent then "
|
||||
"cnonce and nc MUST be present"))
|
||||
else:
|
||||
if self.cnonce or self.nc:
|
||||
raise ValueError(self.errmsg("If qop is not sent, neither cnonce nor nc can be present"))
|
||||
|
||||
raise ValueError(
|
||||
self.errmsg("If qop is not sent, "
|
||||
"neither cnonce nor nc can be present"))
|
||||
|
||||
def __str__(self):
|
||||
return 'authorization : %s' % self.auth_header
|
||||
|
||||
def validate_nonce(self, s, key):
|
||||
"""Validate the nonce.
|
||||
Returns True if nonce was generated by synthesize_nonce() and the timestamp
|
||||
is not spoofed, else returns False.
|
||||
Returns True if nonce was generated by synthesize_nonce() and the
|
||||
timestamp is not spoofed, else returns False.
|
||||
|
||||
s
|
||||
A string related to the resource, such as the hostname of the server.
|
||||
|
||||
A string related to the resource, such as the hostname of
|
||||
the server.
|
||||
|
||||
key
|
||||
A secret string known only to the server.
|
||||
|
||||
Both s and key must be the same values which were used to synthesize the nonce
|
||||
we are trying to validate.
|
||||
|
||||
Both s and key must be the same values which were used to synthesize
|
||||
the nonce we are trying to validate.
|
||||
"""
|
||||
try:
|
||||
timestamp, hashpart = self.nonce.split(':', 1)
|
||||
s_timestamp, s_hashpart = synthesize_nonce(s, key, timestamp).split(':', 1)
|
||||
s_timestamp, s_hashpart = synthesize_nonce(
|
||||
s, key, timestamp).split(':', 1)
|
||||
is_valid = s_hashpart == hashpart
|
||||
if self.debug:
|
||||
TRACE('validate_nonce: %s' % is_valid)
|
||||
return is_valid
|
||||
except ValueError: # split() error
|
||||
except ValueError: # split() error
|
||||
pass
|
||||
return False
|
||||
|
||||
|
||||
def is_nonce_stale(self, max_age_seconds=600):
|
||||
"""Returns True if a validated nonce is stale. The nonce contains a
|
||||
timestamp in plaintext and also a secure hash of the timestamp. You should
|
||||
first validate the nonce to ensure the plaintext timestamp is not spoofed.
|
||||
timestamp in plaintext and also a secure hash of the timestamp.
|
||||
You should first validate the nonce to ensure the plaintext
|
||||
timestamp is not spoofed.
|
||||
"""
|
||||
try:
|
||||
timestamp, hashpart = self.nonce.split(':', 1)
|
||||
if int(timestamp) + max_age_seconds > int(time.time()):
|
||||
return False
|
||||
except ValueError: # int() error
|
||||
except ValueError: # int() error
|
||||
pass
|
||||
if self.debug:
|
||||
TRACE("nonce is stale")
|
||||
return True
|
||||
|
||||
|
||||
def HA2(self, entity_body=''):
|
||||
"""Returns the H(A2) string. See :rfc:`2617` section 3.2.2.3."""
|
||||
# RFC 2617 3.2.2.3
|
||||
# If the "qop" directive's value is "auth" or is unspecified, then A2 is:
|
||||
# If the "qop" directive's value is "auth" or is unspecified,
|
||||
# then A2 is:
|
||||
# A2 = method ":" digest-uri-value
|
||||
#
|
||||
# If the "qop" value is "auth-int", then A2 is:
|
||||
@@ -238,11 +255,11 @@ class HttpDigestAuthorization (object):
|
||||
elif self.qop == "auth-int":
|
||||
a2 = "%s:%s:%s" % (self.http_method, self.uri, H(entity_body))
|
||||
else:
|
||||
# in theory, this should never happen, since I validate qop in __init__()
|
||||
# in theory, this should never happen, since I validate qop in
|
||||
# __init__()
|
||||
raise ValueError(self.errmsg("Unrecognized value for qop!"))
|
||||
return H(a2)
|
||||
|
||||
|
||||
def request_digest(self, ha1, entity_body=''):
|
||||
"""Calculates the Request-Digest. See :rfc:`2617` section 3.2.2.1.
|
||||
|
||||
@@ -253,22 +270,24 @@ class HttpDigestAuthorization (object):
|
||||
If 'qop' is set to 'auth-int', then A2 includes a hash
|
||||
of the "entity body". The entity body is the part of the
|
||||
message which follows the HTTP headers. See :rfc:`2617` section
|
||||
4.3. This refers to the entity the user agent sent in the request which
|
||||
has the Authorization header. Typically GET requests don't have an entity,
|
||||
and POST requests do.
|
||||
|
||||
4.3. This refers to the entity the user agent sent in the
|
||||
request which has the Authorization header. Typically GET
|
||||
requests don't have an entity, and POST requests do.
|
||||
|
||||
"""
|
||||
ha2 = self.HA2(entity_body)
|
||||
# Request-Digest -- RFC 2617 3.2.2.1
|
||||
if self.qop:
|
||||
req = "%s:%s:%s:%s:%s" % (self.nonce, self.nc, self.cnonce, self.qop, ha2)
|
||||
req = "%s:%s:%s:%s:%s" % (
|
||||
self.nonce, self.nc, self.cnonce, self.qop, ha2)
|
||||
else:
|
||||
req = "%s:%s" % (self.nonce, ha2)
|
||||
|
||||
# RFC 2617 3.2.2.2
|
||||
#
|
||||
# If the "algorithm" directive's value is "MD5" or is unspecified, then A1 is:
|
||||
# A1 = unq(username-value) ":" unq(realm-value) ":" passwd
|
||||
# If the "algorithm" directive's value is "MD5" or is unspecified,
|
||||
# then A1 is:
|
||||
# A1 = unq(username-value) ":" unq(realm-value) ":" passwd
|
||||
#
|
||||
# If the "algorithm" directive's value is "MD5-sess", then A1 is
|
||||
# calculated only once - on the first request by the client following
|
||||
@@ -282,8 +301,8 @@ class HttpDigestAuthorization (object):
|
||||
return digest
|
||||
|
||||
|
||||
|
||||
def www_authenticate(realm, key, algorithm='MD5', nonce=None, qop=qop_auth, stale=False):
|
||||
def www_authenticate(realm, key, algorithm='MD5', nonce=None, qop=qop_auth,
|
||||
stale=False):
|
||||
"""Constructs a WWW-Authenticate header for Digest authentication."""
|
||||
if qop not in valid_qops:
|
||||
raise ValueError("Unsupported value for qop: '%s'" % qop)
|
||||
@@ -293,7 +312,7 @@ def www_authenticate(realm, key, algorithm='MD5', nonce=None, qop=qop_auth, stal
|
||||
if nonce is None:
|
||||
nonce = synthesize_nonce(realm, key)
|
||||
s = 'Digest realm="%s", nonce="%s", algorithm="%s", qop="%s"' % (
|
||||
realm, nonce, algorithm, qop)
|
||||
realm, nonce, algorithm, qop)
|
||||
if stale:
|
||||
s += ', stale="true"'
|
||||
return s
|
||||
@@ -302,16 +321,16 @@ def www_authenticate(realm, key, algorithm='MD5', nonce=None, qop=qop_auth, stal
|
||||
def digest_auth(realm, get_ha1, key, debug=False):
|
||||
"""A CherryPy tool which hooks at before_handler to perform
|
||||
HTTP Digest Access Authentication, as specified in :rfc:`2617`.
|
||||
|
||||
If the request has an 'authorization' header with a 'Digest' scheme, this
|
||||
tool authenticates the credentials supplied in that header. If
|
||||
the request has no 'authorization' header, or if it does but the scheme is
|
||||
not "Digest", or if authentication fails, the tool sends a 401 response with
|
||||
a 'WWW-Authenticate' Digest header.
|
||||
|
||||
|
||||
If the request has an 'authorization' header with a 'Digest' scheme,
|
||||
this tool authenticates the credentials supplied in that header.
|
||||
If the request has no 'authorization' header, or if it does but the
|
||||
scheme is not "Digest", or if authentication fails, the tool sends
|
||||
a 401 response with a 'WWW-Authenticate' Digest header.
|
||||
|
||||
realm
|
||||
A string containing the authentication realm.
|
||||
|
||||
|
||||
get_ha1
|
||||
A callable which looks up a username in a credentials store
|
||||
and returns the HA1 string, which is defined in the RFC to be
|
||||
@@ -320,46 +339,52 @@ def digest_auth(realm, get_ha1, key, debug=False):
|
||||
where username is obtained from the request's 'authorization' header.
|
||||
If username is not found in the credentials store, get_ha1() returns
|
||||
None.
|
||||
|
||||
|
||||
key
|
||||
A secret string known only to the server, used in the synthesis of nonces.
|
||||
|
||||
A secret string known only to the server, used in the synthesis
|
||||
of nonces.
|
||||
|
||||
"""
|
||||
request = cherrypy.serving.request
|
||||
|
||||
|
||||
auth_header = request.headers.get('authorization')
|
||||
nonce_is_stale = False
|
||||
if auth_header is not None:
|
||||
try:
|
||||
auth = HttpDigestAuthorization(auth_header, request.method, debug=debug)
|
||||
auth = HttpDigestAuthorization(
|
||||
auth_header, request.method, debug=debug)
|
||||
except ValueError:
|
||||
raise cherrypy.HTTPError(400, "The Authorization header could not be parsed.")
|
||||
|
||||
raise cherrypy.HTTPError(
|
||||
400, "The Authorization header could not be parsed.")
|
||||
|
||||
if debug:
|
||||
TRACE(str(auth))
|
||||
|
||||
|
||||
if auth.validate_nonce(realm, key):
|
||||
ha1 = get_ha1(realm, auth.username)
|
||||
if ha1 is not None:
|
||||
# note that for request.body to be available we need to hook in at
|
||||
# before_handler, not on_start_resource like 3.1.x digest_auth does.
|
||||
# note that for request.body to be available we need to
|
||||
# hook in at before_handler, not on_start_resource like
|
||||
# 3.1.x digest_auth does.
|
||||
digest = auth.request_digest(ha1, entity_body=request.body)
|
||||
if digest == auth.response: # authenticated
|
||||
if digest == auth.response: # authenticated
|
||||
if debug:
|
||||
TRACE("digest matches auth.response")
|
||||
# Now check if nonce is stale.
|
||||
# The choice of ten minutes' lifetime for nonce is somewhat arbitrary
|
||||
# The choice of ten minutes' lifetime for nonce is somewhat
|
||||
# arbitrary
|
||||
nonce_is_stale = auth.is_nonce_stale(max_age_seconds=600)
|
||||
if not nonce_is_stale:
|
||||
request.login = auth.username
|
||||
if debug:
|
||||
TRACE("authentication of %s successful" % auth.username)
|
||||
TRACE("authentication of %s successful" %
|
||||
auth.username)
|
||||
return
|
||||
|
||||
|
||||
# Respond with 401 status and a WWW-Authenticate header
|
||||
header = www_authenticate(realm, key, stale=nonce_is_stale)
|
||||
if debug:
|
||||
TRACE(header)
|
||||
cherrypy.serving.response.headers['WWW-Authenticate'] = header
|
||||
raise cherrypy.HTTPError(401, "You are not authorized to access that resource")
|
||||
|
||||
raise cherrypy.HTTPError(
|
||||
401, "You are not authorized to access that resource")
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""
|
||||
CherryPy implements a simple caching system as a pluggable Tool. This tool tries
|
||||
to be an (in-process) HTTP/1.1-compliant cache. It's not quite there yet, but
|
||||
it's probably good enough for most sites.
|
||||
CherryPy implements a simple caching system as a pluggable Tool. This tool
|
||||
tries to be an (in-process) HTTP/1.1-compliant cache. It's not quite there
|
||||
yet, but it's probably good enough for most sites.
|
||||
|
||||
In general, GET responses are cached (along with selecting headers) and, if
|
||||
another request arrives for the same resource, the caching Tool will return 304
|
||||
@@ -9,8 +9,8 @@ Not Modified if possible, or serve the cached response otherwise. It also sets
|
||||
request.cached to True if serving a cached representation, and sets
|
||||
request.cacheable to False (so it doesn't get cached again).
|
||||
|
||||
If POST, PUT, or DELETE requests are made for a cached resource, they invalidate
|
||||
(delete) any cached response.
|
||||
If POST, PUT, or DELETE requests are made for a cached resource, they
|
||||
invalidate (delete) any cached response.
|
||||
|
||||
Usage
|
||||
=====
|
||||
@@ -39,58 +39,58 @@ import time
|
||||
|
||||
import cherrypy
|
||||
from cherrypy.lib import cptools, httputil
|
||||
from cherrypy._cpcompat import copyitems, ntob, set_daemon, sorted
|
||||
from cherrypy._cpcompat import copyitems, ntob, set_daemon, sorted, Event
|
||||
|
||||
|
||||
class Cache(object):
|
||||
|
||||
"""Base class for Cache implementations."""
|
||||
|
||||
|
||||
def get(self):
|
||||
"""Return the current variant if in the cache, else None."""
|
||||
raise NotImplemented
|
||||
|
||||
|
||||
def put(self, obj, size):
|
||||
"""Store the current variant in the cache."""
|
||||
raise NotImplemented
|
||||
|
||||
|
||||
def delete(self):
|
||||
"""Remove ALL cached variants of the current resource."""
|
||||
raise NotImplemented
|
||||
|
||||
|
||||
def clear(self):
|
||||
"""Reset the cache to its initial, empty state."""
|
||||
raise NotImplemented
|
||||
|
||||
|
||||
|
||||
# ------------------------------- Memory Cache ------------------------------- #
|
||||
|
||||
|
||||
# ------------------------------ Memory Cache ------------------------------- #
|
||||
class AntiStampedeCache(dict):
|
||||
|
||||
"""A storage system for cached items which reduces stampede collisions."""
|
||||
|
||||
|
||||
def wait(self, key, timeout=5, debug=False):
|
||||
"""Return the cached value for the given key, or None.
|
||||
|
||||
|
||||
If timeout is not None, and the value is already
|
||||
being calculated by another thread, wait until the given timeout has
|
||||
elapsed. If the value is available before the timeout expires, it is
|
||||
returned. If not, None is returned, and a sentinel placed in the cache
|
||||
to signal other threads to wait.
|
||||
|
||||
|
||||
If timeout is None, no waiting is performed nor sentinels used.
|
||||
"""
|
||||
value = self.get(key)
|
||||
if isinstance(value, threading._Event):
|
||||
if isinstance(value, Event):
|
||||
if timeout is None:
|
||||
# Ignore the other thread and recalc it ourselves.
|
||||
if debug:
|
||||
cherrypy.log('No timeout', 'TOOLS.CACHING')
|
||||
return None
|
||||
|
||||
|
||||
# Wait until it's done or times out.
|
||||
if debug:
|
||||
cherrypy.log('Waiting up to %s seconds' % timeout, 'TOOLS.CACHING')
|
||||
cherrypy.log('Waiting up to %s seconds' %
|
||||
timeout, 'TOOLS.CACHING')
|
||||
value.wait(timeout)
|
||||
if value.result is not None:
|
||||
# The other thread finished its calculation. Use it.
|
||||
@@ -104,7 +104,7 @@ class AntiStampedeCache(dict):
|
||||
e = threading.Event()
|
||||
e.result = None
|
||||
dict.__setitem__(self, key, e)
|
||||
|
||||
|
||||
return None
|
||||
elif value is None:
|
||||
# Stick an Event in the slot so other threads wait
|
||||
@@ -115,12 +115,12 @@ class AntiStampedeCache(dict):
|
||||
e.result = None
|
||||
dict.__setitem__(self, key, e)
|
||||
return value
|
||||
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
"""Set the cached value for the given key."""
|
||||
existing = self.get(key)
|
||||
dict.__setitem__(self, key, value)
|
||||
if isinstance(existing, threading._Event):
|
||||
if isinstance(existing, Event):
|
||||
# Set Event.result so other threads waiting on it have
|
||||
# immediate access without needing to poll the cache again.
|
||||
existing.result = value
|
||||
@@ -128,49 +128,51 @@ class AntiStampedeCache(dict):
|
||||
|
||||
|
||||
class MemoryCache(Cache):
|
||||
|
||||
"""An in-memory cache for varying response content.
|
||||
|
||||
|
||||
Each key in self.store is a URI, and each value is an AntiStampedeCache.
|
||||
The response for any given URI may vary based on the values of
|
||||
"selecting request headers"; that is, those named in the Vary
|
||||
response header. We assume the list of header names to be constant
|
||||
for each URI throughout the lifetime of the application, and store
|
||||
that list in ``self.store[uri].selecting_headers``.
|
||||
|
||||
|
||||
The items contained in ``self.store[uri]`` have keys which are tuples of
|
||||
request header values (in the same order as the names in its
|
||||
selecting_headers), and values which are the actual responses.
|
||||
"""
|
||||
|
||||
|
||||
maxobjects = 1000
|
||||
"""The maximum number of cached objects; defaults to 1000."""
|
||||
|
||||
|
||||
maxobj_size = 100000
|
||||
"""The maximum size of each cached object in bytes; defaults to 100 KB."""
|
||||
|
||||
|
||||
maxsize = 10000000
|
||||
"""The maximum size of the entire cache in bytes; defaults to 10 MB."""
|
||||
|
||||
|
||||
delay = 600
|
||||
"""Seconds until the cached content expires; defaults to 600 (10 minutes)."""
|
||||
|
||||
"""Seconds until the cached content expires; defaults to 600 (10 minutes).
|
||||
"""
|
||||
|
||||
antistampede_timeout = 5
|
||||
"""Seconds to wait for other threads to release a cache lock."""
|
||||
|
||||
|
||||
expire_freq = 0.1
|
||||
"""Seconds to sleep between cache expiration sweeps."""
|
||||
|
||||
|
||||
debug = False
|
||||
|
||||
|
||||
def __init__(self):
|
||||
self.clear()
|
||||
|
||||
|
||||
# Run self.expire_cache in a separate daemon thread.
|
||||
t = threading.Thread(target=self.expire_cache, name='expire_cache')
|
||||
self.expiration_thread = t
|
||||
set_daemon(t, True)
|
||||
t.start()
|
||||
|
||||
|
||||
def clear(self):
|
||||
"""Reset the cache to its initial, empty state."""
|
||||
self.store = {}
|
||||
@@ -181,10 +183,10 @@ class MemoryCache(Cache):
|
||||
self.tot_expires = 0
|
||||
self.tot_non_modified = 0
|
||||
self.cursize = 0
|
||||
|
||||
|
||||
def expire_cache(self):
|
||||
"""Continuously examine cached objects, expiring stale ones.
|
||||
|
||||
|
||||
This function is designed to be run in its own daemon thread,
|
||||
referenced at ``self.expiration_thread``.
|
||||
"""
|
||||
@@ -207,17 +209,17 @@ class MemoryCache(Cache):
|
||||
pass
|
||||
del self.expirations[expiration_time]
|
||||
time.sleep(self.expire_freq)
|
||||
|
||||
|
||||
def get(self):
|
||||
"""Return the current variant if in the cache, else None."""
|
||||
request = cherrypy.serving.request
|
||||
self.tot_gets += 1
|
||||
|
||||
|
||||
uri = cherrypy.url(qs=request.query_string)
|
||||
uricache = self.store.get(uri)
|
||||
if uricache is None:
|
||||
return None
|
||||
|
||||
|
||||
header_values = [request.headers.get(h, '')
|
||||
for h in uricache.selecting_headers]
|
||||
variant = uricache.wait(key=tuple(sorted(header_values)),
|
||||
@@ -226,12 +228,12 @@ class MemoryCache(Cache):
|
||||
if variant is not None:
|
||||
self.tot_hist += 1
|
||||
return variant
|
||||
|
||||
|
||||
def put(self, variant, size):
|
||||
"""Store the current variant in the cache."""
|
||||
request = cherrypy.serving.request
|
||||
response = cherrypy.serving.response
|
||||
|
||||
|
||||
uri = cherrypy.url(qs=request.query_string)
|
||||
uricache = self.store.get(uri)
|
||||
if uricache is None:
|
||||
@@ -239,24 +241,24 @@ class MemoryCache(Cache):
|
||||
uricache.selecting_headers = [
|
||||
e.value for e in response.headers.elements('Vary')]
|
||||
self.store[uri] = uricache
|
||||
|
||||
|
||||
if len(self.store) < self.maxobjects:
|
||||
total_size = self.cursize + size
|
||||
|
||||
|
||||
# checks if there's space for the object
|
||||
if (size < self.maxobj_size and total_size < self.maxsize):
|
||||
# add to the expirations list
|
||||
expiration_time = response.time + self.delay
|
||||
bucket = self.expirations.setdefault(expiration_time, [])
|
||||
bucket.append((size, uri, uricache.selecting_headers))
|
||||
|
||||
|
||||
# add to the cache
|
||||
header_values = [request.headers.get(h, '')
|
||||
for h in uricache.selecting_headers]
|
||||
uricache[tuple(sorted(header_values))] = variant
|
||||
self.tot_puts += 1
|
||||
self.cursize = total_size
|
||||
|
||||
|
||||
def delete(self):
|
||||
"""Remove ALL cached variants of the current resource."""
|
||||
uri = cherrypy.url(qs=cherrypy.serving.request.query_string)
|
||||
@@ -265,12 +267,12 @@ class MemoryCache(Cache):
|
||||
|
||||
def get(invalid_methods=("POST", "PUT", "DELETE"), debug=False, **kwargs):
|
||||
"""Try to obtain cached output. If fresh enough, raise HTTPError(304).
|
||||
|
||||
|
||||
If POST, PUT, or DELETE:
|
||||
* invalidates (deletes) any cached response for this resource
|
||||
* sets request.cached = False
|
||||
* sets request.cacheable = False
|
||||
|
||||
|
||||
else if a cached copy exists:
|
||||
* sets request.cached = True
|
||||
* sets request.cacheable = False
|
||||
@@ -280,7 +282,7 @@ def get(invalid_methods=("POST", "PUT", "DELETE"), debug=False, **kwargs):
|
||||
if necessary.
|
||||
* sets response.status and response.body to the cached values
|
||||
* returns True
|
||||
|
||||
|
||||
otherwise:
|
||||
* sets request.cached = False
|
||||
* sets request.cacheable = True
|
||||
@@ -288,16 +290,16 @@ def get(invalid_methods=("POST", "PUT", "DELETE"), debug=False, **kwargs):
|
||||
"""
|
||||
request = cherrypy.serving.request
|
||||
response = cherrypy.serving.response
|
||||
|
||||
|
||||
if not hasattr(cherrypy, "_cache"):
|
||||
# Make a process-wide Cache object.
|
||||
cherrypy._cache = kwargs.pop("cache_class", MemoryCache)()
|
||||
|
||||
|
||||
# Take all remaining kwargs and set them on the Cache object.
|
||||
for k, v in kwargs.items():
|
||||
setattr(cherrypy._cache, k, v)
|
||||
cherrypy._cache.debug = debug
|
||||
|
||||
|
||||
# POST, PUT, DELETE should invalidate (delete) the cached copy.
|
||||
# See http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.10.
|
||||
if request.method in invalid_methods:
|
||||
@@ -308,12 +310,12 @@ def get(invalid_methods=("POST", "PUT", "DELETE"), debug=False, **kwargs):
|
||||
request.cached = False
|
||||
request.cacheable = False
|
||||
return False
|
||||
|
||||
|
||||
if 'no-cache' in [e.value for e in request.headers.elements('Pragma')]:
|
||||
request.cached = False
|
||||
request.cacheable = True
|
||||
return False
|
||||
|
||||
|
||||
cache_data = cherrypy._cache.get()
|
||||
request.cached = bool(cache_data)
|
||||
request.cacheable = not request.cached
|
||||
@@ -325,17 +327,19 @@ def get(invalid_methods=("POST", "PUT", "DELETE"), debug=False, **kwargs):
|
||||
directive = atoms.pop(0)
|
||||
if directive == 'max-age':
|
||||
if len(atoms) != 1 or not atoms[0].isdigit():
|
||||
raise cherrypy.HTTPError(400, "Invalid Cache-Control header")
|
||||
raise cherrypy.HTTPError(
|
||||
400, "Invalid Cache-Control header")
|
||||
max_age = int(atoms[0])
|
||||
break
|
||||
elif directive == 'no-cache':
|
||||
if debug:
|
||||
cherrypy.log('Ignoring cache due to Cache-Control: no-cache',
|
||||
'TOOLS.CACHING')
|
||||
cherrypy.log(
|
||||
'Ignoring cache due to Cache-Control: no-cache',
|
||||
'TOOLS.CACHING')
|
||||
request.cached = False
|
||||
request.cacheable = True
|
||||
return False
|
||||
|
||||
|
||||
if debug:
|
||||
cherrypy.log('Reading response from cache', 'TOOLS.CACHING')
|
||||
s, h, b, create_time = cache_data
|
||||
@@ -347,15 +351,16 @@ def get(invalid_methods=("POST", "PUT", "DELETE"), debug=False, **kwargs):
|
||||
request.cached = False
|
||||
request.cacheable = True
|
||||
return False
|
||||
|
||||
# Copy the response headers. See http://www.cherrypy.org/ticket/721.
|
||||
|
||||
# Copy the response headers. See
|
||||
# https://bitbucket.org/cherrypy/cherrypy/issue/721.
|
||||
response.headers = rh = httputil.HeaderMap()
|
||||
for k in h:
|
||||
dict.__setitem__(rh, k, dict.__getitem__(h, k))
|
||||
|
||||
|
||||
# Add the required Age header
|
||||
response.headers["Age"] = str(age)
|
||||
|
||||
|
||||
try:
|
||||
# Note that validate_since depends on a Last-Modified header;
|
||||
# this was put into the cached copy, and should have been
|
||||
@@ -366,7 +371,7 @@ def get(invalid_methods=("POST", "PUT", "DELETE"), debug=False, **kwargs):
|
||||
if x.status == 304:
|
||||
cherrypy._cache.tot_non_modified += 1
|
||||
raise
|
||||
|
||||
|
||||
# serve it & get out from the request
|
||||
response.status = s
|
||||
response.body = b
|
||||
@@ -379,29 +384,29 @@ def get(invalid_methods=("POST", "PUT", "DELETE"), debug=False, **kwargs):
|
||||
def tee_output():
|
||||
"""Tee response output to cache storage. Internal."""
|
||||
# Used by CachingTool by attaching to request.hooks
|
||||
|
||||
|
||||
request = cherrypy.serving.request
|
||||
if 'no-store' in request.headers.values('Cache-Control'):
|
||||
return
|
||||
|
||||
|
||||
def tee(body):
|
||||
"""Tee response.body into a list."""
|
||||
if ('no-cache' in response.headers.values('Pragma') or
|
||||
'no-store' in response.headers.values('Cache-Control')):
|
||||
'no-store' in response.headers.values('Cache-Control')):
|
||||
for chunk in body:
|
||||
yield chunk
|
||||
return
|
||||
|
||||
|
||||
output = []
|
||||
for chunk in body:
|
||||
output.append(chunk)
|
||||
yield chunk
|
||||
|
||||
|
||||
# save the cache data
|
||||
body = ntob('').join(output)
|
||||
cherrypy._cache.put((response.status, response.headers or {},
|
||||
body, response.time), len(body))
|
||||
|
||||
|
||||
response = cherrypy.serving.response
|
||||
response.body = tee(response.body)
|
||||
|
||||
@@ -415,25 +420,25 @@ def expires(secs=0, force=False, debug=False):
|
||||
expire. The 'Expires' header will be set to response.time + secs.
|
||||
If secs is zero, the 'Expires' header is set one year in the past, and
|
||||
the following "cache prevention" headers are also set:
|
||||
|
||||
|
||||
* Pragma: no-cache
|
||||
* Cache-Control': no-cache, must-revalidate
|
||||
|
||||
force
|
||||
If False, the following headers are checked:
|
||||
|
||||
|
||||
* Etag
|
||||
* Last-Modified
|
||||
* Age
|
||||
* Expires
|
||||
|
||||
|
||||
If any are already present, none of the above response headers are set.
|
||||
|
||||
|
||||
"""
|
||||
|
||||
|
||||
response = cherrypy.serving.response
|
||||
headers = response.headers
|
||||
|
||||
|
||||
cacheable = False
|
||||
if not force:
|
||||
# some header names that indicate that the response can be cached
|
||||
@@ -441,7 +446,7 @@ def expires(secs=0, force=False, debug=False):
|
||||
if indicator in headers:
|
||||
cacheable = True
|
||||
break
|
||||
|
||||
|
||||
if not cacheable and not force:
|
||||
if debug:
|
||||
cherrypy.log('request is not cacheable', 'TOOLS.EXPIRES')
|
||||
@@ -450,7 +455,7 @@ def expires(secs=0, force=False, debug=False):
|
||||
cherrypy.log('request is cacheable', 'TOOLS.EXPIRES')
|
||||
if isinstance(secs, datetime.timedelta):
|
||||
secs = (86400 * secs.days) + secs.seconds
|
||||
|
||||
|
||||
if secs == 0:
|
||||
if force or ("Pragma" not in headers):
|
||||
headers["Pragma"] = "no-cache"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""Code-coverage tools for CherryPy.
|
||||
|
||||
To use this module, or the coverage tools in the test suite,
|
||||
you need to download 'coverage.py', either Gareth Rees' `original
|
||||
you need to download 'coverage.py', either Gareth Rees' `original
|
||||
implementation <http://www.garethrees.org/2001/12/04/python-coverage/>`_
|
||||
or Ned Batchelder's `enhanced version:
|
||||
<http://www.nedbatchelder.com/code/modules/coverage.html>`_
|
||||
@@ -24,23 +24,27 @@ import re
|
||||
import sys
|
||||
import cgi
|
||||
from cherrypy._cpcompat import quote_plus
|
||||
import os, os.path
|
||||
import os
|
||||
import os.path
|
||||
localFile = os.path.join(os.path.dirname(__file__), "coverage.cache")
|
||||
|
||||
the_coverage = None
|
||||
try:
|
||||
from coverage import coverage
|
||||
the_coverage = coverage(data_file=localFile)
|
||||
|
||||
def start():
|
||||
the_coverage.start()
|
||||
except ImportError:
|
||||
# Setting the_coverage to None will raise errors
|
||||
# that need to be trapped downstream.
|
||||
the_coverage = None
|
||||
|
||||
|
||||
import warnings
|
||||
warnings.warn("No code coverage will be performed; coverage.py could not be imported.")
|
||||
|
||||
warnings.warn(
|
||||
"No code coverage will be performed; "
|
||||
"coverage.py could not be imported.")
|
||||
|
||||
def start():
|
||||
pass
|
||||
start.priority = 20
|
||||
@@ -69,7 +73,7 @@ TEMPLATE_MENU = """<html>
|
||||
font-size: small;
|
||||
font-weight: bold;
|
||||
font-style: italic;
|
||||
margin-top: 5px;
|
||||
margin-top: 5px;
|
||||
}
|
||||
input { border: 1px solid #ccc; padding: 2px; }
|
||||
.directory {
|
||||
@@ -118,15 +122,18 @@ TEMPLATE_FORM = """
|
||||
<div id="options">
|
||||
<form action='menu' method=GET>
|
||||
<input type='hidden' name='base' value='%(base)s' />
|
||||
Show percentages <input type='checkbox' %(showpct)s name='showpct' value='checked' /><br />
|
||||
Hide files over <input type='text' id='pct' name='pct' value='%(pct)s' size='3' />%%<br />
|
||||
Show percentages
|
||||
<input type='checkbox' %(showpct)s name='showpct' value='checked' /><br />
|
||||
Hide files over
|
||||
<input type='text' id='pct' name='pct' value='%(pct)s' size='3' />%%<br />
|
||||
Exclude files matching<br />
|
||||
<input type='text' id='exclude' name='exclude' value='%(exclude)s' size='20' />
|
||||
<input type='text' id='exclude' name='exclude'
|
||||
value='%(exclude)s' size='20' />
|
||||
<br />
|
||||
|
||||
<input type='submit' value='Change view' id="submit"/>
|
||||
</form>
|
||||
</div>"""
|
||||
</div>"""
|
||||
|
||||
TEMPLATE_FRAMESET = """<html>
|
||||
<head><title>CherryPy coverage data</title></head>
|
||||
@@ -173,7 +180,10 @@ TEMPLATE_LOC_EXCLUDED = """<tr class="excluded">
|
||||
<td>%s</td>
|
||||
</tr>\n"""
|
||||
|
||||
TEMPLATE_ITEM = "%s%s<a class='file' href='report?name=%s' target='main'>%s</a>\n"
|
||||
TEMPLATE_ITEM = (
|
||||
"%s%s<a class='file' href='report?name=%s' target='main'>%s</a>\n"
|
||||
)
|
||||
|
||||
|
||||
def _percent(statements, missing):
|
||||
s = len(statements)
|
||||
@@ -182,24 +192,31 @@ def _percent(statements, missing):
|
||||
return int(round(100.0 * e / s))
|
||||
return 0
|
||||
|
||||
|
||||
def _show_branch(root, base, path, pct=0, showpct=False, exclude="",
|
||||
coverage=the_coverage):
|
||||
|
||||
|
||||
# Show the directory name and any of our children
|
||||
dirs = [k for k, v in root.items() if v]
|
||||
dirs.sort()
|
||||
for name in dirs:
|
||||
newpath = os.path.join(path, name)
|
||||
|
||||
|
||||
if newpath.lower().startswith(base):
|
||||
relpath = newpath[len(base):]
|
||||
yield "| " * relpath.count(os.sep)
|
||||
yield "<a class='directory' href='menu?base=%s&exclude=%s'>%s</a>\n" % \
|
||||
(newpath, quote_plus(exclude), name)
|
||||
|
||||
for chunk in _show_branch(root[name], base, newpath, pct, showpct, exclude, coverage=coverage):
|
||||
yield (
|
||||
"<a class='directory' "
|
||||
"href='menu?base=%s&exclude=%s'>%s</a>\n" %
|
||||
(newpath, quote_plus(exclude), name)
|
||||
)
|
||||
|
||||
for chunk in _show_branch(
|
||||
root[name], base, newpath, pct, showpct,
|
||||
exclude, coverage=coverage
|
||||
):
|
||||
yield chunk
|
||||
|
||||
|
||||
# Now list the files
|
||||
if path.lower().startswith(base):
|
||||
relpath = path[len(base):]
|
||||
@@ -207,7 +224,7 @@ def _show_branch(root, base, path, pct=0, showpct=False, exclude="",
|
||||
files.sort()
|
||||
for name in files:
|
||||
newpath = os.path.join(path, name)
|
||||
|
||||
|
||||
pc_str = ""
|
||||
if showpct:
|
||||
try:
|
||||
@@ -217,22 +234,24 @@ def _show_branch(root, base, path, pct=0, showpct=False, exclude="",
|
||||
pass
|
||||
else:
|
||||
pc = _percent(statements, missing)
|
||||
pc_str = ("%3d%% " % pc).replace(' ',' ')
|
||||
pc_str = ("%3d%% " % pc).replace(' ', ' ')
|
||||
if pc < float(pct) or pc == -1:
|
||||
pc_str = "<span class='fail'>%s</span>" % pc_str
|
||||
else:
|
||||
pc_str = "<span class='pass'>%s</span>" % pc_str
|
||||
|
||||
|
||||
yield TEMPLATE_ITEM % ("| " * (relpath.count(os.sep) + 1),
|
||||
pc_str, newpath, name)
|
||||
|
||||
|
||||
def _skip_file(path, exclude):
|
||||
if exclude:
|
||||
return bool(re.search(exclude, path))
|
||||
|
||||
|
||||
def _graft(path, tree):
|
||||
d = tree
|
||||
|
||||
|
||||
p = path
|
||||
atoms = []
|
||||
while True:
|
||||
@@ -243,12 +262,13 @@ def _graft(path, tree):
|
||||
atoms.append(p)
|
||||
if p != "/":
|
||||
atoms.append("/")
|
||||
|
||||
|
||||
atoms.reverse()
|
||||
for node in atoms:
|
||||
if node:
|
||||
d = d.setdefault(node, {})
|
||||
|
||||
|
||||
def get_tree(base, exclude, coverage=the_coverage):
|
||||
"""Return covered module names as a nested dict."""
|
||||
tree = {}
|
||||
@@ -258,8 +278,9 @@ def get_tree(base, exclude, coverage=the_coverage):
|
||||
_graft(path, tree)
|
||||
return tree
|
||||
|
||||
|
||||
class CoverStats(object):
|
||||
|
||||
|
||||
def __init__(self, coverage, root=None):
|
||||
self.coverage = coverage
|
||||
if root is None:
|
||||
@@ -268,20 +289,20 @@ class CoverStats(object):
|
||||
import cherrypy
|
||||
root = os.path.dirname(cherrypy.__file__)
|
||||
self.root = root
|
||||
|
||||
|
||||
def index(self):
|
||||
return TEMPLATE_FRAMESET % self.root.lower()
|
||||
index.exposed = True
|
||||
|
||||
|
||||
def menu(self, base="/", pct="50", showpct="",
|
||||
exclude=r'python\d\.\d|test|tut\d|tutorial'):
|
||||
|
||||
|
||||
# The coverage module uses all-lower-case names.
|
||||
base = base.lower().rstrip(os.sep)
|
||||
|
||||
|
||||
yield TEMPLATE_MENU
|
||||
yield TEMPLATE_FORM % locals()
|
||||
|
||||
|
||||
# Start by showing links for parent paths
|
||||
yield "<div id='crumbs'>"
|
||||
path = ""
|
||||
@@ -292,22 +313,23 @@ class CoverStats(object):
|
||||
yield ("<a href='menu?base=%s&exclude=%s'>%s</a> %s"
|
||||
% (path, quote_plus(exclude), atom, os.sep))
|
||||
yield "</div>"
|
||||
|
||||
|
||||
yield "<div id='tree'>"
|
||||
|
||||
|
||||
# Then display the tree
|
||||
tree = get_tree(base, exclude, self.coverage)
|
||||
if not tree:
|
||||
yield "<p>No modules covered.</p>"
|
||||
else:
|
||||
for chunk in _show_branch(tree, base, "/", pct,
|
||||
showpct=='checked', exclude, coverage=self.coverage):
|
||||
showpct == 'checked', exclude,
|
||||
coverage=self.coverage):
|
||||
yield chunk
|
||||
|
||||
|
||||
yield "</div>"
|
||||
yield "</body></html>"
|
||||
menu.exposed = True
|
||||
|
||||
|
||||
def annotated_file(self, filename, statements, excluded, missing):
|
||||
source = open(filename, 'r')
|
||||
buffer = []
|
||||
@@ -329,9 +351,10 @@ class CoverStats(object):
|
||||
yield template % (lno, cgi.escape(pastline))
|
||||
buffer = []
|
||||
yield template % (lineno, cgi.escape(line))
|
||||
|
||||
|
||||
def report(self, name):
|
||||
filename, statements, excluded, missing, _ = self.coverage.analysis2(name)
|
||||
filename, statements, excluded, missing, _ = self.coverage.analysis2(
|
||||
name)
|
||||
pc = _percent(statements, missing)
|
||||
yield TEMPLATE_COVERAGE % dict(name=os.path.basename(name),
|
||||
fullpath=name,
|
||||
@@ -350,9 +373,9 @@ def serve(path=localFile, port=8080, root=None):
|
||||
if coverage is None:
|
||||
raise ImportError("The coverage module could not be imported.")
|
||||
from coverage import coverage
|
||||
cov = coverage(data_file = path)
|
||||
cov = coverage(data_file=path)
|
||||
cov.load()
|
||||
|
||||
|
||||
import cherrypy
|
||||
cherrypy.config.update({'server.socket_port': int(port),
|
||||
'server.thread_pool': 10,
|
||||
@@ -362,4 +385,3 @@ def serve(path=localFile, port=8080, root=None):
|
||||
|
||||
if __name__ == "__main__":
|
||||
serve(*tuple(sys.argv[1:]))
|
||||
|
||||
|
||||
@@ -21,33 +21,37 @@ to collect stats to import a third-party module. Therefore, we choose to
|
||||
re-use the `logging` module by adding a `statistics` object to it.
|
||||
|
||||
That `logging.statistics` object is a nested dict. It is not a custom class,
|
||||
because that would 1) require libraries and applications to import a third-
|
||||
party module in order to participate, 2) inhibit innovation in extrapolation
|
||||
approaches and in reporting tools, and 3) be slow. There are, however, some
|
||||
specifications regarding the structure of the dict.
|
||||
because that would:
|
||||
|
||||
{
|
||||
+----"SQLAlchemy": {
|
||||
| "Inserts": 4389745,
|
||||
| "Inserts per Second":
|
||||
| lambda s: s["Inserts"] / (time() - s["Start"]),
|
||||
| C +---"Table Statistics": {
|
||||
| o | "widgets": {-----------+
|
||||
N | l | "Rows": 1.3M, | Record
|
||||
a | l | "Inserts": 400, |
|
||||
m | e | },---------------------+
|
||||
e | c | "froobles": {
|
||||
s | t | "Rows": 7845,
|
||||
p | i | "Inserts": 0,
|
||||
a | o | },
|
||||
c | n +---},
|
||||
e | "Slow Queries":
|
||||
| [{"Query": "SELECT * FROM widgets;",
|
||||
| "Processing Time": 47.840923343,
|
||||
| },
|
||||
| ],
|
||||
+----},
|
||||
}
|
||||
1. require libraries and applications to import a third-party module in
|
||||
order to participate
|
||||
2. inhibit innovation in extrapolation approaches and in reporting tools, and
|
||||
3. be slow.
|
||||
|
||||
There are, however, some specifications regarding the structure of the dict.::
|
||||
|
||||
{
|
||||
+----"SQLAlchemy": {
|
||||
| "Inserts": 4389745,
|
||||
| "Inserts per Second":
|
||||
| lambda s: s["Inserts"] / (time() - s["Start"]),
|
||||
| C +---"Table Statistics": {
|
||||
| o | "widgets": {-----------+
|
||||
N | l | "Rows": 1.3M, | Record
|
||||
a | l | "Inserts": 400, |
|
||||
m | e | },---------------------+
|
||||
e | c | "froobles": {
|
||||
s | t | "Rows": 7845,
|
||||
p | i | "Inserts": 0,
|
||||
a | o | },
|
||||
c | n +---},
|
||||
e | "Slow Queries":
|
||||
| [{"Query": "SELECT * FROM widgets;",
|
||||
| "Processing Time": 47.840923343,
|
||||
| },
|
||||
| ],
|
||||
+----},
|
||||
}
|
||||
|
||||
The `logging.statistics` dict has four levels. The topmost level is nothing
|
||||
more than a set of names to introduce modularity, usually along the lines of
|
||||
@@ -65,13 +69,13 @@ Each namespace, then, is a dict of named statistical values, such as
|
||||
good on a report: spaces and capitalization are just fine.
|
||||
|
||||
In addition to scalars, values in a namespace MAY be a (third-layer)
|
||||
dict, or a list, called a "collection". For example, the CherryPy StatsTool
|
||||
keeps track of what each request is doing (or has most recently done)
|
||||
in a 'Requests' collection, where each key is a thread ID; each
|
||||
dict, or a list, called a "collection". For example, the CherryPy
|
||||
:class:`StatsTool` keeps track of what each request is doing (or has most
|
||||
recently done) in a 'Requests' collection, where each key is a thread ID; each
|
||||
value in the subdict MUST be a fourth dict (whew!) of statistical data about
|
||||
each thread. We call each subdict in the collection a "record". Similarly,
|
||||
the StatsTool also keeps a list of slow queries, where each record contains
|
||||
data about each slow query, in order.
|
||||
the :class:`StatsTool` also keeps a list of slow queries, where each record
|
||||
contains data about each slow query, in order.
|
||||
|
||||
Values in a namespace or record may also be functions, which brings us to:
|
||||
|
||||
@@ -86,17 +90,17 @@ scalar values you already have on hand.
|
||||
|
||||
When it comes time to report on the gathered data, however, we usually have
|
||||
much more freedom in what we can calculate. Therefore, whenever reporting
|
||||
tools (like the provided StatsPage CherryPy class) fetch the contents of
|
||||
`logging.statistics` for reporting, they first call `extrapolate_statistics`
|
||||
(passing the whole `statistics` dict as the only argument). This makes a
|
||||
deep copy of the statistics dict so that the reporting tool can both iterate
|
||||
over it and even change it without harming the original. But it also expands
|
||||
any functions in the dict by calling them. For example, you might have a
|
||||
'Current Time' entry in the namespace with the value "lambda scope: time.time()".
|
||||
The "scope" parameter is the current namespace dict (or record, if we're
|
||||
currently expanding one of those instead), allowing you access to existing
|
||||
static entries. If you're truly evil, you can even modify more than one entry
|
||||
at a time.
|
||||
tools (like the provided :class:`StatsPage` CherryPy class) fetch the contents
|
||||
of `logging.statistics` for reporting, they first call
|
||||
`extrapolate_statistics` (passing the whole `statistics` dict as the only
|
||||
argument). This makes a deep copy of the statistics dict so that the
|
||||
reporting tool can both iterate over it and even change it without harming
|
||||
the original. But it also expands any functions in the dict by calling them.
|
||||
For example, you might have a 'Current Time' entry in the namespace with the
|
||||
value "lambda scope: time.time()". The "scope" parameter is the current
|
||||
namespace dict (or record, if we're currently expanding one of those
|
||||
instead), allowing you access to existing static entries. If you're truly
|
||||
evil, you can even modify more than one entry at a time.
|
||||
|
||||
However, don't try to calculate an entry and then use its value in further
|
||||
extrapolations; the order in which the functions are called is not guaranteed.
|
||||
@@ -108,19 +112,20 @@ After the whole thing has been extrapolated, it's time for:
|
||||
Reporting
|
||||
---------
|
||||
|
||||
The StatsPage class grabs the `logging.statistics` dict, extrapolates it all,
|
||||
and then transforms it to HTML for easy viewing. Each namespace gets its own
|
||||
header and attribute table, plus an extra table for each collection. This is
|
||||
NOT part of the statistics specification; other tools can format how they like.
|
||||
The :class:`StatsPage` class grabs the `logging.statistics` dict, extrapolates
|
||||
it all, and then transforms it to HTML for easy viewing. Each namespace gets
|
||||
its own header and attribute table, plus an extra table for each collection.
|
||||
This is NOT part of the statistics specification; other tools can format how
|
||||
they like.
|
||||
|
||||
You can control which columns are output and how they are formatted by updating
|
||||
StatsPage.formatting, which is a dict that mirrors the keys and nesting of
|
||||
`logging.statistics`. The difference is that, instead of data values, it has
|
||||
formatting values. Use None for a given key to indicate to the StatsPage that a
|
||||
given column should not be output. Use a string with formatting (such as '%.3f')
|
||||
to interpolate the value(s), or use a callable (such as lambda v: v.isoformat())
|
||||
for more advanced formatting. Any entry which is not mentioned in the formatting
|
||||
dict is output unchanged.
|
||||
given column should not be output. Use a string with formatting
|
||||
(such as '%.3f') to interpolate the value(s), or use a callable (such as
|
||||
lambda v: v.isoformat()) for more advanced formatting. Any entry which is not
|
||||
mentioned in the formatting dict is output unchanged.
|
||||
|
||||
Monitoring
|
||||
----------
|
||||
@@ -145,12 +150,12 @@ entries to False or True, if present.
|
||||
Usage
|
||||
=====
|
||||
|
||||
To collect statistics on CherryPy applications:
|
||||
To collect statistics on CherryPy applications::
|
||||
|
||||
from cherrypy.lib import cpstats
|
||||
appconfig['/']['tools.cpstats.on'] = True
|
||||
|
||||
To collect statistics on your own code:
|
||||
To collect statistics on your own code::
|
||||
|
||||
import logging
|
||||
# Initialize the repository
|
||||
@@ -172,20 +177,22 @@ To collect statistics on your own code:
|
||||
if mystats.get('Enabled', False):
|
||||
mystats['Important Events'] += 1
|
||||
|
||||
To report statistics:
|
||||
To report statistics::
|
||||
|
||||
root.cpstats = cpstats.StatsPage()
|
||||
|
||||
To format statistics reports:
|
||||
To format statistics reports::
|
||||
|
||||
See 'Reporting', above.
|
||||
|
||||
"""
|
||||
|
||||
# -------------------------------- Statistics -------------------------------- #
|
||||
# ------------------------------- Statistics -------------------------------- #
|
||||
|
||||
import logging
|
||||
if not hasattr(logging, 'statistics'): logging.statistics = {}
|
||||
if not hasattr(logging, 'statistics'):
|
||||
logging.statistics = {}
|
||||
|
||||
|
||||
def extrapolate_statistics(scope):
|
||||
"""Return an extrapolated copy of the given scope."""
|
||||
@@ -201,7 +208,7 @@ def extrapolate_statistics(scope):
|
||||
return c
|
||||
|
||||
|
||||
# --------------------- CherryPy Applications Statistics --------------------- #
|
||||
# -------------------- CherryPy Applications Statistics --------------------- #
|
||||
|
||||
import threading
|
||||
import time
|
||||
@@ -211,12 +218,20 @@ import cherrypy
|
||||
appstats = logging.statistics.setdefault('CherryPy Applications', {})
|
||||
appstats.update({
|
||||
'Enabled': True,
|
||||
'Bytes Read/Request': lambda s: (s['Total Requests'] and
|
||||
(s['Total Bytes Read'] / float(s['Total Requests'])) or 0.0),
|
||||
'Bytes Read/Request': lambda s: (
|
||||
s['Total Requests'] and
|
||||
(s['Total Bytes Read'] / float(s['Total Requests'])) or
|
||||
0.0
|
||||
),
|
||||
'Bytes Read/Second': lambda s: s['Total Bytes Read'] / s['Uptime'](s),
|
||||
'Bytes Written/Request': lambda s: (s['Total Requests'] and
|
||||
(s['Total Bytes Written'] / float(s['Total Requests'])) or 0.0),
|
||||
'Bytes Written/Second': lambda s: s['Total Bytes Written'] / s['Uptime'](s),
|
||||
'Bytes Written/Request': lambda s: (
|
||||
s['Total Requests'] and
|
||||
(s['Total Bytes Written'] / float(s['Total Requests'])) or
|
||||
0.0
|
||||
),
|
||||
'Bytes Written/Second': lambda s: (
|
||||
s['Total Bytes Written'] / s['Uptime'](s)
|
||||
),
|
||||
'Current Time': lambda s: time.time(),
|
||||
'Current Requests': 0,
|
||||
'Requests/Second': lambda s: float(s['Total Requests']) / s['Uptime'](s),
|
||||
@@ -228,28 +243,29 @@ appstats.update({
|
||||
'Total Time': 0,
|
||||
'Uptime': lambda s: time.time() - s['Start Time'],
|
||||
'Requests': {},
|
||||
})
|
||||
})
|
||||
|
||||
proc_time = lambda s: time.time() - s['Start Time']
|
||||
|
||||
|
||||
class ByteCountWrapper(object):
|
||||
|
||||
"""Wraps a file-like object, counting the number of bytes read."""
|
||||
|
||||
|
||||
def __init__(self, rfile):
|
||||
self.rfile = rfile
|
||||
self.bytes_read = 0
|
||||
|
||||
|
||||
def read(self, size=-1):
|
||||
data = self.rfile.read(size)
|
||||
self.bytes_read += len(data)
|
||||
return data
|
||||
|
||||
|
||||
def readline(self, size=-1):
|
||||
data = self.rfile.readline(size)
|
||||
self.bytes_read += len(data)
|
||||
return data
|
||||
|
||||
|
||||
def readlines(self, sizehint=0):
|
||||
# Shamelessly stolen from StringIO
|
||||
total = 0
|
||||
@@ -262,13 +278,13 @@ class ByteCountWrapper(object):
|
||||
break
|
||||
line = self.readline()
|
||||
return lines
|
||||
|
||||
|
||||
def close(self):
|
||||
self.rfile.close()
|
||||
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
|
||||
def next(self):
|
||||
data = self.rfile.next()
|
||||
self.bytes_read += len(data)
|
||||
@@ -279,30 +295,31 @@ average_uriset_time = lambda s: s['Count'] and (s['Sum'] / s['Count']) or 0
|
||||
|
||||
|
||||
class StatsTool(cherrypy.Tool):
|
||||
|
||||
"""Record various information about the current request."""
|
||||
|
||||
|
||||
def __init__(self):
|
||||
cherrypy.Tool.__init__(self, 'on_end_request', self.record_stop)
|
||||
|
||||
|
||||
def _setup(self):
|
||||
"""Hook this tool into cherrypy.request.
|
||||
|
||||
|
||||
The standard CherryPy request object will automatically call this
|
||||
method when the tool is "turned on" in config.
|
||||
"""
|
||||
if appstats.get('Enabled', False):
|
||||
cherrypy.Tool._setup(self)
|
||||
self.record_start()
|
||||
|
||||
|
||||
def record_start(self):
|
||||
"""Record the beginning of a request."""
|
||||
request = cherrypy.serving.request
|
||||
if not hasattr(request.rfile, 'bytes_read'):
|
||||
request.rfile = ByteCountWrapper(request.rfile)
|
||||
request.body.fp = request.rfile
|
||||
|
||||
|
||||
r = request.remote
|
||||
|
||||
|
||||
appstats['Current Requests'] += 1
|
||||
appstats['Total Requests'] += 1
|
||||
appstats['Requests'][threading._get_ident()] = {
|
||||
@@ -315,37 +332,39 @@ class StatsTool(cherrypy.Tool):
|
||||
'Request-Line': request.request_line,
|
||||
'Response Status': None,
|
||||
'Start Time': time.time(),
|
||||
}
|
||||
}
|
||||
|
||||
def record_stop(self, uriset=None, slow_queries=1.0, slow_queries_count=100,
|
||||
debug=False, **kwargs):
|
||||
def record_stop(
|
||||
self, uriset=None, slow_queries=1.0, slow_queries_count=100,
|
||||
debug=False, **kwargs):
|
||||
"""Record the end of a request."""
|
||||
resp = cherrypy.serving.response
|
||||
w = appstats['Requests'][threading._get_ident()]
|
||||
|
||||
|
||||
r = cherrypy.request.rfile.bytes_read
|
||||
w['Bytes Read'] = r
|
||||
appstats['Total Bytes Read'] += r
|
||||
|
||||
|
||||
if resp.stream:
|
||||
w['Bytes Written'] = 'chunked'
|
||||
else:
|
||||
cl = int(resp.headers.get('Content-Length', 0))
|
||||
w['Bytes Written'] = cl
|
||||
appstats['Total Bytes Written'] += cl
|
||||
|
||||
w['Response Status'] = getattr(resp, 'output_status', None) or resp.status
|
||||
|
||||
|
||||
w['Response Status'] = getattr(
|
||||
resp, 'output_status', None) or resp.status
|
||||
|
||||
w['End Time'] = time.time()
|
||||
p = w['End Time'] - w['Start Time']
|
||||
w['Processing Time'] = p
|
||||
appstats['Total Time'] += p
|
||||
|
||||
|
||||
appstats['Current Requests'] -= 1
|
||||
|
||||
|
||||
if debug:
|
||||
cherrypy.log('Stats recorded: %s' % repr(w), 'TOOLS.CPSTATS')
|
||||
|
||||
|
||||
if uriset:
|
||||
rs = appstats.setdefault('URI Set Tracking', {})
|
||||
r = rs.setdefault(uriset, {
|
||||
@@ -357,7 +376,7 @@ class StatsTool(cherrypy.Tool):
|
||||
r['Max'] = p
|
||||
r['Count'] += 1
|
||||
r['Sum'] += p
|
||||
|
||||
|
||||
if slow_queries and p > slow_queries:
|
||||
sq = appstats.setdefault('Slow Queries', [])
|
||||
sq.append(w.copy())
|
||||
@@ -388,6 +407,7 @@ missing = object()
|
||||
locale_date = lambda v: time.strftime('%c', time.gmtime(v))
|
||||
iso_format = lambda v: time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(v))
|
||||
|
||||
|
||||
def pause_resume(ns):
|
||||
def _pause_resume(enabled):
|
||||
pause_disabled = ''
|
||||
@@ -410,7 +430,7 @@ def pause_resume(ns):
|
||||
|
||||
|
||||
class StatsPage(object):
|
||||
|
||||
|
||||
formatting = {
|
||||
'CherryPy Applications': {
|
||||
'Enabled': pause_resume('CherryPy Applications'),
|
||||
@@ -427,20 +447,20 @@ class StatsPage(object):
|
||||
'End Time': None,
|
||||
'Processing Time': '%.3f',
|
||||
'Start Time': iso_format,
|
||||
},
|
||||
},
|
||||
'URI Set Tracking': {
|
||||
'Avg': '%.3f',
|
||||
'Max': '%.3f',
|
||||
'Min': '%.3f',
|
||||
'Sum': '%.3f',
|
||||
},
|
||||
},
|
||||
'Requests': {
|
||||
'Bytes Read': '%s',
|
||||
'Bytes Written': '%s',
|
||||
'End Time': None,
|
||||
'Processing Time': '%.3f',
|
||||
'Start Time': None,
|
||||
},
|
||||
},
|
||||
},
|
||||
'CherryPy WSGIServer': {
|
||||
'Enabled': pause_resume('CherryPy WSGIServer'),
|
||||
@@ -448,8 +468,7 @@ class StatsPage(object):
|
||||
'Start time': iso_format,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
|
||||
def index(self):
|
||||
# Transform the raw data into pretty output for HTML
|
||||
yield """
|
||||
@@ -500,18 +519,25 @@ table.stats2 th {
|
||||
""" % title
|
||||
for i, (key, value) in enumerate(scalars):
|
||||
colnum = i % 3
|
||||
if colnum == 0: yield """
|
||||
if colnum == 0:
|
||||
yield """
|
||||
<tr>"""
|
||||
yield (
|
||||
"""
|
||||
<th>%(key)s</th><td id='%(title)s-%(key)s'>%(value)s</td>""" %
|
||||
vars()
|
||||
)
|
||||
if colnum == 2:
|
||||
yield """
|
||||
</tr>"""
|
||||
|
||||
if colnum == 0:
|
||||
yield """
|
||||
<th>%(key)s</th><td id='%(title)s-%(key)s'>%(value)s</td>""" % vars()
|
||||
if colnum == 2: yield """
|
||||
</tr>"""
|
||||
|
||||
if colnum == 0: yield """
|
||||
<th></th><td></td>
|
||||
<th></th><td></td>
|
||||
</tr>"""
|
||||
elif colnum == 1: yield """
|
||||
elif colnum == 1:
|
||||
yield """
|
||||
<th></th><td></td>
|
||||
</tr>"""
|
||||
yield """
|
||||
@@ -547,7 +573,7 @@ table.stats2 th {
|
||||
</html>
|
||||
"""
|
||||
index.exposed = True
|
||||
|
||||
|
||||
def get_namespaces(self):
|
||||
"""Yield (title, scalars, collections) for each namespace."""
|
||||
s = extrapolate_statistics(logging.statistics)
|
||||
@@ -574,7 +600,7 @@ table.stats2 th {
|
||||
v = format % v
|
||||
scalars.append((k, v))
|
||||
yield title, scalars, collections
|
||||
|
||||
|
||||
def get_dict_collection(self, v, formatting):
|
||||
"""Return ([headers], [rows]) for the given collection."""
|
||||
# E.g., the 'Requests' dict.
|
||||
@@ -588,7 +614,7 @@ table.stats2 th {
|
||||
if k3 not in headers:
|
||||
headers.append(k3)
|
||||
headers.sort()
|
||||
|
||||
|
||||
subrows = []
|
||||
for k2, record in sorted(v.items()):
|
||||
subrow = [k2]
|
||||
@@ -604,9 +630,9 @@ table.stats2 th {
|
||||
v3 = format % v3
|
||||
subrow.append(v3)
|
||||
subrows.append(subrow)
|
||||
|
||||
|
||||
return headers, subrows
|
||||
|
||||
|
||||
def get_list_collection(self, v, formatting):
|
||||
"""Return ([headers], [subrows]) for the given collection."""
|
||||
# E.g., the 'Slow Queries' list.
|
||||
@@ -620,7 +646,7 @@ table.stats2 th {
|
||||
if k3 not in headers:
|
||||
headers.append(k3)
|
||||
headers.sort()
|
||||
|
||||
|
||||
subrows = []
|
||||
for record in v:
|
||||
subrow = []
|
||||
@@ -636,27 +662,26 @@ table.stats2 th {
|
||||
v3 = format % v3
|
||||
subrow.append(v3)
|
||||
subrows.append(subrow)
|
||||
|
||||
|
||||
return headers, subrows
|
||||
|
||||
|
||||
if json is not None:
|
||||
def data(self):
|
||||
s = extrapolate_statistics(logging.statistics)
|
||||
cherrypy.response.headers['Content-Type'] = 'application/json'
|
||||
return json.dumps(s, sort_keys=True, indent=4)
|
||||
data.exposed = True
|
||||
|
||||
|
||||
def pause(self, namespace):
|
||||
logging.statistics.get(namespace, {})['Enabled'] = False
|
||||
raise cherrypy.HTTPRedirect('./')
|
||||
pause.exposed = True
|
||||
pause.cp_config = {'tools.allow.on': True,
|
||||
'tools.allow.methods': ['POST']}
|
||||
|
||||
|
||||
def resume(self, namespace):
|
||||
logging.statistics.get(namespace, {})['Enabled'] = True
|
||||
raise cherrypy.HTTPRedirect('./')
|
||||
resume.exposed = True
|
||||
resume.cp_config = {'tools.allow.on': True,
|
||||
'tools.allow.methods': ['POST']}
|
||||
|
||||
|
||||
@@ -4,20 +4,21 @@ import logging
|
||||
import re
|
||||
|
||||
import cherrypy
|
||||
from cherrypy._cpcompat import basestring, ntob, md5, set
|
||||
from cherrypy._cpcompat import basestring, md5, set, unicodestr
|
||||
from cherrypy.lib import httputil as _httputil
|
||||
from cherrypy.lib import is_iterator
|
||||
|
||||
|
||||
# Conditional HTTP request support #
|
||||
|
||||
def validate_etags(autotags=False, debug=False):
|
||||
"""Validate the current ETag against If-Match, If-None-Match headers.
|
||||
|
||||
|
||||
If autotags is True, an ETag response-header value will be provided
|
||||
from an MD5 hash of the response body (unless some other code has
|
||||
already provided an ETag header). If False (the default), the ETag
|
||||
will not be automatic.
|
||||
|
||||
|
||||
WARNING: the autotags feature is not designed for URL's which allow
|
||||
methods other than GET. For example, if a POST to the same URL returns
|
||||
no content, the automatic ETag will be incorrect, breaking a fundamental
|
||||
@@ -27,15 +28,15 @@ def validate_etags(autotags=False, debug=False):
|
||||
See :rfc:`2616` Section 14.24.
|
||||
"""
|
||||
response = cherrypy.serving.response
|
||||
|
||||
|
||||
# Guard against being run twice.
|
||||
if hasattr(response, "ETag"):
|
||||
return
|
||||
|
||||
|
||||
status, reason, msg = _httputil.valid_status(response.status)
|
||||
|
||||
|
||||
etag = response.headers.get('ETag')
|
||||
|
||||
|
||||
# Automatic ETag generation. See warning in docstring.
|
||||
if etag:
|
||||
if debug:
|
||||
@@ -52,9 +53,9 @@ def validate_etags(autotags=False, debug=False):
|
||||
if debug:
|
||||
cherrypy.log('Setting ETag: %s' % etag, 'TOOLS.ETAGS')
|
||||
response.headers['ETag'] = etag
|
||||
|
||||
|
||||
response.ETag = etag
|
||||
|
||||
|
||||
# "If the request would, without the If-Match header field, result in
|
||||
# anything other than a 2xx or 412 status, then the If-Match header
|
||||
# MUST be ignored."
|
||||
@@ -62,7 +63,7 @@ def validate_etags(autotags=False, debug=False):
|
||||
cherrypy.log('Status: %s' % status, 'TOOLS.ETAGS')
|
||||
if status >= 200 and status <= 299:
|
||||
request = cherrypy.serving.request
|
||||
|
||||
|
||||
conditions = request.headers.elements('If-Match') or []
|
||||
conditions = [str(x) for x in conditions]
|
||||
if debug:
|
||||
@@ -71,7 +72,7 @@ def validate_etags(autotags=False, debug=False):
|
||||
if conditions and not (conditions == ["*"] or etag in conditions):
|
||||
raise cherrypy.HTTPError(412, "If-Match failed: ETag %r did "
|
||||
"not match %r" % (etag, conditions))
|
||||
|
||||
|
||||
conditions = request.headers.elements('If-None-Match') or []
|
||||
conditions = [str(x) for x in conditions]
|
||||
if debug:
|
||||
@@ -79,16 +80,18 @@ def validate_etags(autotags=False, debug=False):
|
||||
'TOOLS.ETAGS')
|
||||
if conditions == ["*"] or etag in conditions:
|
||||
if debug:
|
||||
cherrypy.log('request.method: %s' % request.method, 'TOOLS.ETAGS')
|
||||
cherrypy.log('request.method: %s' %
|
||||
request.method, 'TOOLS.ETAGS')
|
||||
if request.method in ("GET", "HEAD"):
|
||||
raise cherrypy.HTTPRedirect([], 304)
|
||||
else:
|
||||
raise cherrypy.HTTPError(412, "If-None-Match failed: ETag %r "
|
||||
"matched %r" % (etag, conditions))
|
||||
|
||||
|
||||
def validate_since():
|
||||
"""Validate the current Last-Modified against If-Modified-Since headers.
|
||||
|
||||
|
||||
If no code has set the Last-Modified response header, then no validation
|
||||
will be performed.
|
||||
"""
|
||||
@@ -96,14 +99,14 @@ def validate_since():
|
||||
lastmod = response.headers.get('Last-Modified')
|
||||
if lastmod:
|
||||
status, reason, msg = _httputil.valid_status(response.status)
|
||||
|
||||
|
||||
request = cherrypy.serving.request
|
||||
|
||||
|
||||
since = request.headers.get('If-Unmodified-Since')
|
||||
if since and since != lastmod:
|
||||
if (status >= 200 and status <= 299) or status == 412:
|
||||
raise cherrypy.HTTPError(412)
|
||||
|
||||
|
||||
since = request.headers.get('If-Modified-Since')
|
||||
if since and since == lastmod:
|
||||
if (status >= 200 and status <= 299) or status == 304:
|
||||
@@ -117,11 +120,11 @@ def validate_since():
|
||||
|
||||
def allow(methods=None, debug=False):
|
||||
"""Raise 405 if request.method not in methods (default ['GET', 'HEAD']).
|
||||
|
||||
|
||||
The given methods are case-insensitive, and may be in any order.
|
||||
If only one method is allowed, you may supply a single string;
|
||||
if more than one, supply a list of strings.
|
||||
|
||||
|
||||
Regardless of whether the current method is allowed or not, this
|
||||
also emits an 'Allow' response header, containing the given methods.
|
||||
"""
|
||||
@@ -132,7 +135,7 @@ def allow(methods=None, debug=False):
|
||||
methods = ['GET', 'HEAD']
|
||||
elif 'GET' in methods and 'HEAD' not in methods:
|
||||
methods.append('HEAD')
|
||||
|
||||
|
||||
cherrypy.response.headers['Allow'] = ', '.join(methods)
|
||||
if cherrypy.request.method not in methods:
|
||||
if debug:
|
||||
@@ -148,27 +151,27 @@ def allow(methods=None, debug=False):
|
||||
def proxy(base=None, local='X-Forwarded-Host', remote='X-Forwarded-For',
|
||||
scheme='X-Forwarded-Proto', debug=False):
|
||||
"""Change the base URL (scheme://host[:port][/path]).
|
||||
|
||||
|
||||
For running a CP server behind Apache, lighttpd, or other HTTP server.
|
||||
|
||||
|
||||
For Apache and lighttpd, you should leave the 'local' argument at the
|
||||
default value of 'X-Forwarded-Host'. For Squid, you probably want to set
|
||||
tools.proxy.local = 'Origin'.
|
||||
|
||||
|
||||
If you want the new request.base to include path info (not just the host),
|
||||
you must explicitly set base to the full base path, and ALSO set 'local'
|
||||
to '', so that the X-Forwarded-Host request header (which never includes
|
||||
path info) does not override it. Regardless, the value for 'base' MUST
|
||||
NOT end in a slash.
|
||||
|
||||
|
||||
cherrypy.request.remote.ip (the IP address of the client) will be
|
||||
rewritten if the header specified by the 'remote' arg is valid.
|
||||
By default, 'remote' is set to 'X-Forwarded-For'. If you do not
|
||||
want to rewrite remote.ip, set the 'remote' arg to an empty string.
|
||||
"""
|
||||
|
||||
|
||||
request = cherrypy.serving.request
|
||||
|
||||
|
||||
if scheme:
|
||||
s = request.headers.get(scheme, None)
|
||||
if debug:
|
||||
@@ -181,7 +184,7 @@ def proxy(base=None, local='X-Forwarded-Host', remote='X-Forwarded-For',
|
||||
scheme = s
|
||||
if not scheme:
|
||||
scheme = request.base[:request.base.find("://")]
|
||||
|
||||
|
||||
if local:
|
||||
lbase = request.headers.get(local, None)
|
||||
if debug:
|
||||
@@ -194,27 +197,27 @@ def proxy(base=None, local='X-Forwarded-Host', remote='X-Forwarded-For',
|
||||
base = '127.0.0.1'
|
||||
else:
|
||||
base = '127.0.0.1:%s' % port
|
||||
|
||||
|
||||
if base.find("://") == -1:
|
||||
# add http:// or https:// if needed
|
||||
base = scheme + "://" + base
|
||||
|
||||
|
||||
request.base = base
|
||||
|
||||
|
||||
if remote:
|
||||
xff = request.headers.get(remote)
|
||||
if debug:
|
||||
cherrypy.log('Testing remote %r:%r' % (remote, xff), 'TOOLS.PROXY')
|
||||
if xff:
|
||||
if remote == 'X-Forwarded-For':
|
||||
# See http://bob.pythonmac.org/archives/2005/09/23/apache-x-forwarded-for-caveat/
|
||||
xff = xff.split(',')[-1].strip()
|
||||
#Bug #1268
|
||||
xff = xff.split(',')[0].strip()
|
||||
request.remote.ip = xff
|
||||
|
||||
|
||||
def ignore_headers(headers=('Range',), debug=False):
|
||||
"""Delete request headers whose field names are included in 'headers'.
|
||||
|
||||
|
||||
This is a useful tool for working behind certain HTTP servers;
|
||||
for example, Apache duplicates the work that CP does for 'Range'
|
||||
headers, and will doubly-truncate the response.
|
||||
@@ -241,10 +244,10 @@ response_headers.failsafe = True
|
||||
def referer(pattern, accept=True, accept_missing=False, error=403,
|
||||
message='Forbidden Referer header.', debug=False):
|
||||
"""Raise HTTPError if Referer header does/does not match the given pattern.
|
||||
|
||||
|
||||
pattern
|
||||
A regular expression pattern to test against the Referer.
|
||||
|
||||
|
||||
accept
|
||||
If True, the Referer must match the pattern; if False,
|
||||
the Referer must NOT match the pattern.
|
||||
@@ -254,10 +257,10 @@ def referer(pattern, accept=True, accept_missing=False, error=403,
|
||||
|
||||
error
|
||||
The HTTP error code to return to the client on failure.
|
||||
|
||||
|
||||
message
|
||||
A string to include in the response body on failure.
|
||||
|
||||
|
||||
"""
|
||||
try:
|
||||
ref = cherrypy.serving.request.headers['Referer']
|
||||
@@ -272,44 +275,48 @@ def referer(pattern, accept=True, accept_missing=False, error=403,
|
||||
cherrypy.log('No Referer header', 'TOOLS.REFERER')
|
||||
if accept_missing:
|
||||
return
|
||||
|
||||
|
||||
raise cherrypy.HTTPError(error, message)
|
||||
|
||||
|
||||
class SessionAuth(object):
|
||||
|
||||
"""Assert that the user is logged in."""
|
||||
|
||||
|
||||
session_key = "username"
|
||||
debug = False
|
||||
|
||||
|
||||
def check_username_and_password(self, username, password):
|
||||
pass
|
||||
|
||||
|
||||
def anonymous(self):
|
||||
"""Provide a temporary user name for anonymous users."""
|
||||
pass
|
||||
|
||||
|
||||
def on_login(self, username):
|
||||
pass
|
||||
|
||||
|
||||
def on_logout(self, username):
|
||||
pass
|
||||
|
||||
|
||||
def on_check(self, username):
|
||||
pass
|
||||
|
||||
def login_screen(self, from_page='..', username='', error_msg='', **kwargs):
|
||||
return ntob("""<html><body>
|
||||
|
||||
def login_screen(self, from_page='..', username='', error_msg='',
|
||||
**kwargs):
|
||||
return (unicodestr("""<html><body>
|
||||
Message: %(error_msg)s
|
||||
<form method="post" action="do_login">
|
||||
Login: <input type="text" name="username" value="%(username)s" size="10" /><br />
|
||||
Password: <input type="password" name="password" size="10" /><br />
|
||||
<input type="hidden" name="from_page" value="%(from_page)s" /><br />
|
||||
Login: <input type="text" name="username" value="%(username)s" size="10" />
|
||||
<br />
|
||||
Password: <input type="password" name="password" size="10" />
|
||||
<br />
|
||||
<input type="hidden" name="from_page" value="%(from_page)s" />
|
||||
<br />
|
||||
<input type="submit" />
|
||||
</form>
|
||||
</body></html>""" % {'from_page': from_page, 'username': username,
|
||||
'error_msg': error_msg}, "utf-8")
|
||||
|
||||
</body></html>""") % vars()).encode("utf-8")
|
||||
|
||||
def do_login(self, username, password, from_page='..', **kwargs):
|
||||
"""Login. May raise redirect, or return True if request handled."""
|
||||
response = cherrypy.serving.response
|
||||
@@ -326,7 +333,7 @@ Message: %(error_msg)s
|
||||
cherrypy.session[self.session_key] = username
|
||||
self.on_login(username)
|
||||
raise cherrypy.HTTPRedirect(from_page or "/")
|
||||
|
||||
|
||||
def do_logout(self, from_page='..', **kwargs):
|
||||
"""Logout. May raise redirect, or return True if request handled."""
|
||||
sess = cherrypy.session
|
||||
@@ -336,61 +343,62 @@ Message: %(error_msg)s
|
||||
cherrypy.serving.request.login = None
|
||||
self.on_logout(username)
|
||||
raise cherrypy.HTTPRedirect(from_page)
|
||||
|
||||
|
||||
def do_check(self):
|
||||
"""Assert username. May raise redirect, or return True if request handled."""
|
||||
"""Assert username. Raise redirect, or return True if request handled.
|
||||
"""
|
||||
sess = cherrypy.session
|
||||
request = cherrypy.serving.request
|
||||
response = cherrypy.serving.response
|
||||
|
||||
|
||||
username = sess.get(self.session_key)
|
||||
if not username:
|
||||
sess[self.session_key] = username = self.anonymous()
|
||||
if self.debug:
|
||||
cherrypy.log('No session[username], trying anonymous', 'TOOLS.SESSAUTH')
|
||||
self._debug_message('No session[username], trying anonymous')
|
||||
if not username:
|
||||
url = cherrypy.url(qs=request.query_string)
|
||||
if self.debug:
|
||||
cherrypy.log('No username, routing to login_screen with '
|
||||
'from_page %r' % url, 'TOOLS.SESSAUTH')
|
||||
self._debug_message(
|
||||
'No username, routing to login_screen with from_page %(url)r',
|
||||
locals(),
|
||||
)
|
||||
response.body = self.login_screen(url)
|
||||
if "Content-Length" in response.headers:
|
||||
# Delete Content-Length header so finalize() recalcs it.
|
||||
del response.headers["Content-Length"]
|
||||
return True
|
||||
if self.debug:
|
||||
cherrypy.log('Setting request.login to %r' % username, 'TOOLS.SESSAUTH')
|
||||
self._debug_message('Setting request.login to %(username)r', locals())
|
||||
request.login = username
|
||||
self.on_check(username)
|
||||
|
||||
|
||||
def _debug_message(self, template, context={}):
|
||||
if not self.debug:
|
||||
return
|
||||
cherrypy.log(template % context, 'TOOLS.SESSAUTH')
|
||||
|
||||
def run(self):
|
||||
request = cherrypy.serving.request
|
||||
response = cherrypy.serving.response
|
||||
|
||||
|
||||
path = request.path_info
|
||||
if path.endswith('login_screen'):
|
||||
if self.debug:
|
||||
cherrypy.log('routing %r to login_screen' % path, 'TOOLS.SESSAUTH')
|
||||
return self.login_screen(**request.params)
|
||||
self._debug_message('routing %(path)r to login_screen', locals())
|
||||
response.body = self.login_screen()
|
||||
return True
|
||||
elif path.endswith('do_login'):
|
||||
if request.method != 'POST':
|
||||
response.headers['Allow'] = "POST"
|
||||
if self.debug:
|
||||
cherrypy.log('do_login requires POST', 'TOOLS.SESSAUTH')
|
||||
self._debug_message('do_login requires POST')
|
||||
raise cherrypy.HTTPError(405)
|
||||
if self.debug:
|
||||
cherrypy.log('routing %r to do_login' % path, 'TOOLS.SESSAUTH')
|
||||
self._debug_message('routing %(path)r to do_login', locals())
|
||||
return self.do_login(**request.params)
|
||||
elif path.endswith('do_logout'):
|
||||
if request.method != 'POST':
|
||||
response.headers['Allow'] = "POST"
|
||||
raise cherrypy.HTTPError(405)
|
||||
if self.debug:
|
||||
cherrypy.log('routing %r to do_logout' % path, 'TOOLS.SESSAUTH')
|
||||
self._debug_message('routing %(path)r to do_logout', locals())
|
||||
return self.do_logout(**request.params)
|
||||
else:
|
||||
if self.debug:
|
||||
cherrypy.log('No special path, running do_check', 'TOOLS.SESSAUTH')
|
||||
self._debug_message('No special path, running do_check')
|
||||
return self.do_check()
|
||||
|
||||
|
||||
@@ -412,15 +420,17 @@ def log_traceback(severity=logging.ERROR, debug=False):
|
||||
"""Write the last error's traceback to the cherrypy error log."""
|
||||
cherrypy.log("", "HTTP", severity=severity, traceback=True)
|
||||
|
||||
|
||||
def log_request_headers(debug=False):
|
||||
"""Write request headers to the cherrypy error log."""
|
||||
h = [" %s: %s" % (k, v) for k, v in cherrypy.serving.request.header_list]
|
||||
cherrypy.log('\nRequest Headers:\n' + '\n'.join(h), "HTTP")
|
||||
|
||||
|
||||
def log_hooks(debug=False):
|
||||
"""Write request.hooks to the cherrypy error log."""
|
||||
request = cherrypy.serving.request
|
||||
|
||||
|
||||
msg = []
|
||||
# Sort by the standard points if possible.
|
||||
from cherrypy import _cprequest
|
||||
@@ -428,7 +438,7 @@ def log_hooks(debug=False):
|
||||
for k in request.hooks.keys():
|
||||
if k not in points:
|
||||
points.append(k)
|
||||
|
||||
|
||||
for k in points:
|
||||
msg.append(" %s:" % k)
|
||||
v = request.hooks.get(k, [])
|
||||
@@ -438,6 +448,7 @@ def log_hooks(debug=False):
|
||||
cherrypy.log('\nRequest Hooks for ' + cherrypy.url() +
|
||||
':\n' + '\n'.join(msg), "HTTP")
|
||||
|
||||
|
||||
def redirect(url='', internal=True, debug=False):
|
||||
"""Raise InternalRedirect or HTTPRedirect to the given url."""
|
||||
if debug:
|
||||
@@ -449,11 +460,12 @@ def redirect(url='', internal=True, debug=False):
|
||||
else:
|
||||
raise cherrypy.HTTPRedirect(url)
|
||||
|
||||
|
||||
def trailing_slash(missing=True, extra=False, status=None, debug=False):
|
||||
"""Redirect if path_info has (missing|extra) trailing slash."""
|
||||
request = cherrypy.serving.request
|
||||
pi = request.path_info
|
||||
|
||||
|
||||
if debug:
|
||||
cherrypy.log('is_index: %r, missing: %r, extra: %r, path_info: %r' %
|
||||
(request.is_index, missing, extra, pi),
|
||||
@@ -470,17 +482,17 @@ def trailing_slash(missing=True, extra=False, status=None, debug=False):
|
||||
new_url = cherrypy.url(pi[:-1], request.query_string)
|
||||
raise cherrypy.HTTPRedirect(new_url, status=status or 301)
|
||||
|
||||
|
||||
def flatten(debug=False):
|
||||
"""Wrap response.body in a generator that recursively iterates over body.
|
||||
|
||||
|
||||
This allows cherrypy.response.body to consist of 'nested generators';
|
||||
that is, a set of generators that yield generators.
|
||||
"""
|
||||
import types
|
||||
def flattener(input):
|
||||
numchunks = 0
|
||||
for x in input:
|
||||
if not isinstance(x, types.GeneratorType):
|
||||
if not is_iterator(x):
|
||||
numchunks += 1
|
||||
yield x
|
||||
else:
|
||||
@@ -495,9 +507,9 @@ def flatten(debug=False):
|
||||
|
||||
def accept(media=None, debug=False):
|
||||
"""Return the client's preferred media-type (from the given Content-Types).
|
||||
|
||||
|
||||
If 'media' is None (the default), no test will be performed.
|
||||
|
||||
|
||||
If 'media' is provided, it should be the Content-Type value (as a string)
|
||||
or values (as a list or tuple of strings) which the current resource
|
||||
can emit. The client's acceptable media ranges (as declared in the
|
||||
@@ -505,16 +517,16 @@ def accept(media=None, debug=False):
|
||||
values; the first such string is returned. That is, the return value
|
||||
will always be one of the strings provided in the 'media' arg (or None
|
||||
if 'media' is None).
|
||||
|
||||
|
||||
If no match is found, then HTTPError 406 (Not Acceptable) is raised.
|
||||
Note that most web browsers send */* as a (low-quality) acceptable
|
||||
media range, which should match any Content-Type. In addition, "...if
|
||||
no Accept header field is present, then it is assumed that the client
|
||||
accepts all media types."
|
||||
|
||||
|
||||
Matching types are checked in order of client preference first,
|
||||
and then in the order of the given 'media' values.
|
||||
|
||||
|
||||
Note that this function does not honor accept-params (other than "q").
|
||||
"""
|
||||
if not media:
|
||||
@@ -522,7 +534,7 @@ def accept(media=None, debug=False):
|
||||
if isinstance(media, basestring):
|
||||
media = [media]
|
||||
request = cherrypy.serving.request
|
||||
|
||||
|
||||
# Parse the Accept request header, and try to match one
|
||||
# of the requested media-ranges (in order of preference).
|
||||
ranges = request.headers.elements('Accept')
|
||||
@@ -556,7 +568,7 @@ def accept(media=None, debug=False):
|
||||
cherrypy.log('Match due to %s' % element.value,
|
||||
'TOOLS.ACCEPT')
|
||||
return element.value
|
||||
|
||||
|
||||
# No suitable media-range found.
|
||||
ah = request.headers.get('Accept')
|
||||
if ah is None:
|
||||
@@ -569,22 +581,22 @@ def accept(media=None, debug=False):
|
||||
|
||||
|
||||
class MonitoredHeaderMap(_httputil.HeaderMap):
|
||||
|
||||
|
||||
def __init__(self):
|
||||
self.accessed_headers = set()
|
||||
|
||||
|
||||
def __getitem__(self, key):
|
||||
self.accessed_headers.add(key)
|
||||
return _httputil.HeaderMap.__getitem__(self, key)
|
||||
|
||||
|
||||
def __contains__(self, key):
|
||||
self.accessed_headers.add(key)
|
||||
return _httputil.HeaderMap.__contains__(self, key)
|
||||
|
||||
|
||||
def get(self, key, default=None):
|
||||
self.accessed_headers.add(key)
|
||||
return _httputil.HeaderMap.get(self, key, default=default)
|
||||
|
||||
|
||||
if hasattr({}, 'has_key'):
|
||||
# Python 2
|
||||
def has_key(self, key):
|
||||
@@ -593,25 +605,26 @@ class MonitoredHeaderMap(_httputil.HeaderMap):
|
||||
|
||||
|
||||
def autovary(ignore=None, debug=False):
|
||||
"""Auto-populate the Vary response header based on request.header access."""
|
||||
"""Auto-populate the Vary response header based on request.header access.
|
||||
"""
|
||||
request = cherrypy.serving.request
|
||||
|
||||
|
||||
req_h = request.headers
|
||||
request.headers = MonitoredHeaderMap()
|
||||
request.headers.update(req_h)
|
||||
if ignore is None:
|
||||
ignore = set(['Content-Disposition', 'Content-Length', 'Content-Type'])
|
||||
|
||||
|
||||
def set_response_header():
|
||||
resp_h = cherrypy.serving.response.headers
|
||||
v = set([e.value for e in resp_h.elements('Vary')])
|
||||
if debug:
|
||||
cherrypy.log('Accessed headers: %s' % request.headers.accessed_headers,
|
||||
'TOOLS.AUTOVARY')
|
||||
cherrypy.log(
|
||||
'Accessed headers: %s' % request.headers.accessed_headers,
|
||||
'TOOLS.AUTOVARY')
|
||||
v = v.union(request.headers.accessed_headers)
|
||||
v = v.difference(ignore)
|
||||
v = list(v)
|
||||
v.sort()
|
||||
resp_h['Vary'] = ', '.join(v)
|
||||
request.hooks.attach('before_finalize', set_response_header, 95)
|
||||
|
||||
|
||||
@@ -4,24 +4,25 @@ import time
|
||||
import cherrypy
|
||||
from cherrypy._cpcompat import basestring, BytesIO, ntob, set, unicodestr
|
||||
from cherrypy.lib import file_generator
|
||||
from cherrypy.lib import is_closable_iterator
|
||||
from cherrypy.lib import set_vary_header
|
||||
|
||||
|
||||
def decode(encoding=None, default_encoding='utf-8'):
|
||||
"""Replace or extend the list of charsets used to decode a request entity.
|
||||
|
||||
|
||||
Either argument may be a single string or a list of strings.
|
||||
|
||||
|
||||
encoding
|
||||
If not None, restricts the set of charsets attempted while decoding
|
||||
a request entity to the given set (even if a different charset is given in
|
||||
the Content-Type request header).
|
||||
|
||||
a request entity to the given set (even if a different charset is
|
||||
given in the Content-Type request header).
|
||||
|
||||
default_encoding
|
||||
Only in effect if the 'encoding' argument is not given.
|
||||
If given, the set of charsets attempted while decoding a request entity is
|
||||
*extended* with the given value(s).
|
||||
|
||||
If given, the set of charsets attempted while decoding a request
|
||||
entity is *extended* with the given value(s).
|
||||
|
||||
"""
|
||||
body = cherrypy.request.body
|
||||
if encoding is not None:
|
||||
@@ -33,9 +34,34 @@ def decode(encoding=None, default_encoding='utf-8'):
|
||||
default_encoding = [default_encoding]
|
||||
body.attempt_charsets = body.attempt_charsets + default_encoding
|
||||
|
||||
class UTF8StreamEncoder:
|
||||
def __init__(self, iterator):
|
||||
self._iterator = iterator
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
return self.__next__()
|
||||
|
||||
def __next__(self):
|
||||
res = next(self._iterator)
|
||||
if isinstance(res, unicodestr):
|
||||
res = res.encode('utf-8')
|
||||
return res
|
||||
|
||||
def close(self):
|
||||
if is_closable_iterator(self._iterator):
|
||||
self._iterator.close()
|
||||
|
||||
def __getattr__(self, attr):
|
||||
if attr.startswith('__'):
|
||||
raise AttributeError(self, attr)
|
||||
return getattr(self._iterator, attr)
|
||||
|
||||
|
||||
class ResponseEncoder:
|
||||
|
||||
|
||||
default_encoding = 'utf-8'
|
||||
failmsg = "Response body could not be encoded with %r."
|
||||
encoding = None
|
||||
@@ -43,11 +69,11 @@ class ResponseEncoder:
|
||||
text_only = True
|
||||
add_charset = True
|
||||
debug = False
|
||||
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
for k, v in kwargs.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
|
||||
self.attempted_charsets = set()
|
||||
request = cherrypy.serving.request
|
||||
if request.handler is not None:
|
||||
@@ -56,17 +82,17 @@ class ResponseEncoder:
|
||||
cherrypy.log('Replacing request.handler', 'TOOLS.ENCODE')
|
||||
self.oldhandler = request.handler
|
||||
request.handler = self
|
||||
|
||||
|
||||
def encode_stream(self, encoding):
|
||||
"""Encode a streaming response body.
|
||||
|
||||
|
||||
Use a generator wrapper, and just pray it works as the stream is
|
||||
being written out.
|
||||
"""
|
||||
if encoding in self.attempted_charsets:
|
||||
return False
|
||||
self.attempted_charsets.add(encoding)
|
||||
|
||||
|
||||
def encoder(body):
|
||||
for chunk in body:
|
||||
if isinstance(chunk, unicodestr):
|
||||
@@ -74,31 +100,30 @@ class ResponseEncoder:
|
||||
yield chunk
|
||||
self.body = encoder(self.body)
|
||||
return True
|
||||
|
||||
|
||||
def encode_string(self, encoding):
|
||||
"""Encode a buffered response body."""
|
||||
if encoding in self.attempted_charsets:
|
||||
return False
|
||||
self.attempted_charsets.add(encoding)
|
||||
|
||||
try:
|
||||
body = []
|
||||
for chunk in self.body:
|
||||
if isinstance(chunk, unicodestr):
|
||||
body = []
|
||||
for chunk in self.body:
|
||||
if isinstance(chunk, unicodestr):
|
||||
try:
|
||||
chunk = chunk.encode(encoding, self.errors)
|
||||
body.append(chunk)
|
||||
self.body = body
|
||||
except (LookupError, UnicodeError):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
except (LookupError, UnicodeError):
|
||||
return False
|
||||
body.append(chunk)
|
||||
self.body = body
|
||||
return True
|
||||
|
||||
def find_acceptable_charset(self):
|
||||
request = cherrypy.serving.request
|
||||
response = cherrypy.serving.response
|
||||
|
||||
|
||||
if self.debug:
|
||||
cherrypy.log('response.stream %r' % response.stream, 'TOOLS.ENCODE')
|
||||
cherrypy.log('response.stream %r' %
|
||||
response.stream, 'TOOLS.ENCODE')
|
||||
if response.stream:
|
||||
encoder = self.encode_stream
|
||||
else:
|
||||
@@ -115,22 +140,24 @@ class ResponseEncoder:
|
||||
# >>> len(t.encode("utf7"))
|
||||
# 8
|
||||
del response.headers["Content-Length"]
|
||||
|
||||
|
||||
# Parse the Accept-Charset request header, and try to provide one
|
||||
# of the requested charsets (in order of user preference).
|
||||
encs = request.headers.elements('Accept-Charset')
|
||||
charsets = [enc.value.lower() for enc in encs]
|
||||
if self.debug:
|
||||
cherrypy.log('charsets %s' % repr(charsets), 'TOOLS.ENCODE')
|
||||
|
||||
|
||||
if self.encoding is not None:
|
||||
# If specified, force this encoding to be used, or fail.
|
||||
encoding = self.encoding.lower()
|
||||
if self.debug:
|
||||
cherrypy.log('Specified encoding %r' % encoding, 'TOOLS.ENCODE')
|
||||
cherrypy.log('Specified encoding %r' %
|
||||
encoding, 'TOOLS.ENCODE')
|
||||
if (not charsets) or "*" in charsets or encoding in charsets:
|
||||
if self.debug:
|
||||
cherrypy.log('Attempting encoding %r' % encoding, 'TOOLS.ENCODE')
|
||||
cherrypy.log('Attempting encoding %r' %
|
||||
encoding, 'TOOLS.ENCODE')
|
||||
if encoder(encoding):
|
||||
return encoding
|
||||
else:
|
||||
@@ -142,7 +169,8 @@ class ResponseEncoder:
|
||||
if encoder(self.default_encoding):
|
||||
return self.default_encoding
|
||||
else:
|
||||
raise cherrypy.HTTPError(500, self.failmsg % self.default_encoding)
|
||||
raise cherrypy.HTTPError(500, self.failmsg %
|
||||
self.default_encoding)
|
||||
else:
|
||||
for element in encs:
|
||||
if element.qvalue > 0:
|
||||
@@ -160,7 +188,7 @@ class ResponseEncoder:
|
||||
'0)' % element, 'TOOLS.ENCODE')
|
||||
if encoder(encoding):
|
||||
return encoding
|
||||
|
||||
|
||||
if "*" not in charsets:
|
||||
# If no "*" is present in an Accept-Charset field, then all
|
||||
# character sets not explicitly mentioned get a quality
|
||||
@@ -173,20 +201,21 @@ class ResponseEncoder:
|
||||
'TOOLS.ENCODE')
|
||||
if encoder(iso):
|
||||
return iso
|
||||
|
||||
|
||||
# No suitable encoding found.
|
||||
ac = request.headers.get('Accept-Charset')
|
||||
if ac is None:
|
||||
msg = "Your client did not send an Accept-Charset header."
|
||||
else:
|
||||
msg = "Your client sent this Accept-Charset header: %s." % ac
|
||||
msg += " We tried these charsets: %s." % ", ".join(self.attempted_charsets)
|
||||
_charsets = ", ".join(sorted(self.attempted_charsets))
|
||||
msg += " We tried these charsets: %s." % (_charsets,)
|
||||
raise cherrypy.HTTPError(406, msg)
|
||||
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
response = cherrypy.serving.response
|
||||
self.body = self.oldhandler(*args, **kwargs)
|
||||
|
||||
|
||||
if isinstance(self.body, basestring):
|
||||
# strings get wrapped in a list because iterating over a single
|
||||
# item list is much faster than iterating over every character
|
||||
@@ -200,46 +229,49 @@ class ResponseEncoder:
|
||||
self.body = file_generator(self.body)
|
||||
elif self.body is None:
|
||||
self.body = []
|
||||
|
||||
|
||||
ct = response.headers.elements("Content-Type")
|
||||
if self.debug:
|
||||
cherrypy.log('Content-Type: %r' % [str(h) for h in ct], 'TOOLS.ENCODE')
|
||||
if ct:
|
||||
cherrypy.log('Content-Type: %r' % [str(h)
|
||||
for h in ct], 'TOOLS.ENCODE')
|
||||
if ct and self.add_charset:
|
||||
ct = ct[0]
|
||||
if self.text_only:
|
||||
if ct.value.lower().startswith("text/"):
|
||||
if self.debug:
|
||||
cherrypy.log('Content-Type %s starts with "text/"' % ct,
|
||||
'TOOLS.ENCODE')
|
||||
cherrypy.log(
|
||||
'Content-Type %s starts with "text/"' % ct,
|
||||
'TOOLS.ENCODE')
|
||||
do_find = True
|
||||
else:
|
||||
if self.debug:
|
||||
cherrypy.log('Not finding because Content-Type %s does '
|
||||
'not start with "text/"' % ct,
|
||||
cherrypy.log('Not finding because Content-Type %s '
|
||||
'does not start with "text/"' % ct,
|
||||
'TOOLS.ENCODE')
|
||||
do_find = False
|
||||
else:
|
||||
if self.debug:
|
||||
cherrypy.log('Finding because not text_only', 'TOOLS.ENCODE')
|
||||
cherrypy.log('Finding because not text_only',
|
||||
'TOOLS.ENCODE')
|
||||
do_find = True
|
||||
|
||||
|
||||
if do_find:
|
||||
# Set "charset=..." param on response Content-Type header
|
||||
ct.params['charset'] = self.find_acceptable_charset()
|
||||
if self.add_charset:
|
||||
if self.debug:
|
||||
cherrypy.log('Setting Content-Type %s' % ct,
|
||||
'TOOLS.ENCODE')
|
||||
response.headers["Content-Type"] = str(ct)
|
||||
|
||||
if self.debug:
|
||||
cherrypy.log('Setting Content-Type %s' % ct,
|
||||
'TOOLS.ENCODE')
|
||||
response.headers["Content-Type"] = str(ct)
|
||||
|
||||
return self.body
|
||||
|
||||
# GZIP
|
||||
|
||||
|
||||
def compress(body, compress_level):
|
||||
"""Compress 'body' at the given compress_level."""
|
||||
import zlib
|
||||
|
||||
|
||||
# See http://www.gzip.org/zlib/rfc-gzip.html
|
||||
yield ntob('\x1f\x8b') # ID1 and ID2: gzip marker
|
||||
yield ntob('\x08') # CM: compression method
|
||||
@@ -248,7 +280,7 @@ def compress(body, compress_level):
|
||||
yield struct.pack("<L", int(time.time()) & int('FFFFFFFF', 16))
|
||||
yield ntob('\x02') # XFL: max compression, slowest algo
|
||||
yield ntob('\xff') # OS: unknown
|
||||
|
||||
|
||||
crc = zlib.crc32(ntob(""))
|
||||
size = 0
|
||||
zobj = zlib.compressobj(compress_level,
|
||||
@@ -259,15 +291,16 @@ def compress(body, compress_level):
|
||||
crc = zlib.crc32(line, crc)
|
||||
yield zobj.compress(line)
|
||||
yield zobj.flush()
|
||||
|
||||
|
||||
# CRC32: 4 bytes
|
||||
yield struct.pack("<L", crc & int('FFFFFFFF', 16))
|
||||
# ISIZE: 4 bytes
|
||||
yield struct.pack("<L", size & int('FFFFFFFF', 16))
|
||||
|
||||
|
||||
def decompress(body):
|
||||
import gzip
|
||||
|
||||
|
||||
zbuf = BytesIO()
|
||||
zbuf.write(body)
|
||||
zbuf.seek(0)
|
||||
@@ -277,9 +310,10 @@ def decompress(body):
|
||||
return data
|
||||
|
||||
|
||||
def gzip(compress_level=5, mime_types=['text/html', 'text/plain'], debug=False):
|
||||
def gzip(compress_level=5, mime_types=['text/html', 'text/plain'],
|
||||
debug=False):
|
||||
"""Try to gzip the response body if Content-Type in mime_types.
|
||||
|
||||
|
||||
cherrypy.response.headers['Content-Type'] must be set to one of the
|
||||
values in the mime_types arg before calling this function.
|
||||
|
||||
@@ -287,32 +321,32 @@ def gzip(compress_level=5, mime_types=['text/html', 'text/plain'], debug=False):
|
||||
* type/subtype
|
||||
* type/*
|
||||
* type/*+subtype
|
||||
|
||||
|
||||
No compression is performed if any of the following hold:
|
||||
* The client sends no Accept-Encoding request header
|
||||
* No 'gzip' or 'x-gzip' is present in the Accept-Encoding header
|
||||
* No 'gzip' or 'x-gzip' with a qvalue > 0 is present
|
||||
* The 'identity' value is given with a qvalue > 0.
|
||||
|
||||
|
||||
"""
|
||||
request = cherrypy.serving.request
|
||||
response = cherrypy.serving.response
|
||||
|
||||
|
||||
set_vary_header(response, "Accept-Encoding")
|
||||
|
||||
|
||||
if not response.body:
|
||||
# Response body is empty (might be a 304 for instance)
|
||||
if debug:
|
||||
cherrypy.log('No response body', context='TOOLS.GZIP')
|
||||
return
|
||||
|
||||
|
||||
# If returning cached content (which should already have been gzipped),
|
||||
# don't re-zip.
|
||||
if getattr(request, "cached", False):
|
||||
if debug:
|
||||
cherrypy.log('Not gzipping cached response', context='TOOLS.GZIP')
|
||||
return
|
||||
|
||||
|
||||
acceptable = request.headers.elements('Accept-Encoding')
|
||||
if not acceptable:
|
||||
# If no Accept-Encoding field is present in a request,
|
||||
@@ -325,7 +359,7 @@ def gzip(compress_level=5, mime_types=['text/html', 'text/plain'], debug=False):
|
||||
if debug:
|
||||
cherrypy.log('No Accept-Encoding', context='TOOLS.GZIP')
|
||||
return
|
||||
|
||||
|
||||
ct = response.headers.get('Content-Type', '').split(';')[0]
|
||||
for coding in acceptable:
|
||||
if coding.value == 'identity' and coding.qvalue != 0:
|
||||
@@ -339,7 +373,7 @@ def gzip(compress_level=5, mime_types=['text/html', 'text/plain'], debug=False):
|
||||
cherrypy.log('Zero gzip qvalue: %s' % coding,
|
||||
context='TOOLS.GZIP')
|
||||
return
|
||||
|
||||
|
||||
if ct not in mime_types:
|
||||
# If the list of provided mime-types contains tokens
|
||||
# such as 'text/*' or 'application/*+xml',
|
||||
@@ -370,7 +404,7 @@ def gzip(compress_level=5, mime_types=['text/html', 'text/plain'], debug=False):
|
||||
cherrypy.log('Content-Type %s not in mime_types %r' %
|
||||
(ct, mime_types), context='TOOLS.GZIP')
|
||||
return
|
||||
|
||||
|
||||
if debug:
|
||||
cherrypy.log('Gzipping', context='TOOLS.GZIP')
|
||||
# Return a generator that compresses the page
|
||||
@@ -379,10 +413,9 @@ def gzip(compress_level=5, mime_types=['text/html', 'text/plain'], debug=False):
|
||||
if "Content-Length" in response.headers:
|
||||
# Delete Content-Length header so finalize() recalcs it.
|
||||
del response.headers["Content-Length"]
|
||||
|
||||
|
||||
return
|
||||
|
||||
|
||||
if debug:
|
||||
cherrypy.log('No acceptable encoding found.', context='GZIP')
|
||||
cherrypy.HTTPError(406, "identity, gzip").set_response()
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ import inspect
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
try:
|
||||
import objgraph
|
||||
except ImportError:
|
||||
@@ -15,6 +15,7 @@ from cherrypy.process.plugins import SimplePlugin
|
||||
|
||||
|
||||
class ReferrerTree(object):
|
||||
|
||||
"""An object which gathers all referrers of an object to a given depth."""
|
||||
|
||||
peek_length = 40
|
||||
@@ -89,6 +90,7 @@ class ReferrerTree(object):
|
||||
def format(self, tree):
|
||||
"""Return a list of string reprs from a nested list of referrers."""
|
||||
output = []
|
||||
|
||||
def ascend(branch, depth=1):
|
||||
for parent, grandparents in branch:
|
||||
output.append((" " * depth) + self._format(parent))
|
||||
@@ -103,15 +105,15 @@ def get_instances(cls):
|
||||
|
||||
|
||||
class RequestCounter(SimplePlugin):
|
||||
|
||||
|
||||
def start(self):
|
||||
self.count = 0
|
||||
|
||||
|
||||
def before_request(self):
|
||||
self.count += 1
|
||||
|
||||
|
||||
def after_request(self):
|
||||
self.count -=1
|
||||
self.count -= 1
|
||||
request_counter = RequestCounter(cherrypy.engine)
|
||||
request_counter.subscribe()
|
||||
|
||||
@@ -129,15 +131,17 @@ def get_context(obj):
|
||||
|
||||
|
||||
class GCRoot(object):
|
||||
|
||||
"""A CherryPy page handler for testing reference leaks."""
|
||||
|
||||
classes = [(_cprequest.Request, 2, 2,
|
||||
"Should be 1 in this request thread and 1 in the main thread."),
|
||||
(_cprequest.Response, 2, 2,
|
||||
"Should be 1 in this request thread and 1 in the main thread."),
|
||||
(_cpwsgi.AppResponse, 1, 1,
|
||||
"Should be 1 in this request thread only."),
|
||||
]
|
||||
classes = [
|
||||
(_cprequest.Request, 2, 2,
|
||||
"Should be 1 in this request thread and 1 in the main thread."),
|
||||
(_cprequest.Response, 2, 2,
|
||||
"Should be 1 in this request thread and 1 in the main thread."),
|
||||
(_cpwsgi.AppResponse, 1, 1,
|
||||
"Should be 1 in this request thread only."),
|
||||
]
|
||||
|
||||
def index(self):
|
||||
return "Hello, world!"
|
||||
@@ -145,14 +149,14 @@ class GCRoot(object):
|
||||
|
||||
def stats(self):
|
||||
output = ["Statistics:"]
|
||||
|
||||
|
||||
for trial in range(10):
|
||||
if request_counter.count > 0:
|
||||
break
|
||||
time.sleep(0.5)
|
||||
else:
|
||||
output.append("\nNot all requests closed properly.")
|
||||
|
||||
|
||||
# gc_collect isn't perfectly synchronous, because it may
|
||||
# break reference cycles that then take time to fully
|
||||
# finalize. Call it thrice and hope for the best.
|
||||
@@ -208,7 +212,6 @@ class GCRoot(object):
|
||||
t = ReferrerTree(ignore=[objs], maxdepth=3)
|
||||
tree = t.ascend(obj)
|
||||
output.extend(t.format(tree))
|
||||
|
||||
|
||||
return "\n".join(output)
|
||||
stats.exposed = True
|
||||
|
||||
|
||||
@@ -4,4 +4,3 @@ warnings.warn('cherrypy.lib.http has been deprecated and will be removed '
|
||||
DeprecationWarning)
|
||||
|
||||
from cherrypy.lib.httputil import *
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""
|
||||
This module defines functions to implement HTTP Digest Authentication (:rfc:`2617`).
|
||||
This module defines functions to implement HTTP Digest Authentication
|
||||
(:rfc:`2617`).
|
||||
This has full compliance with 'Digest' and 'Basic' authentication methods. In
|
||||
'Digest' it supports both MD5 and MD5-sess algorithms.
|
||||
|
||||
@@ -7,13 +8,14 @@ Usage:
|
||||
First use 'doAuth' to request the client authentication for a
|
||||
certain resource. You should send an httplib.UNAUTHORIZED response to the
|
||||
client so he knows he has to authenticate itself.
|
||||
|
||||
|
||||
Then use 'parseAuthorization' to retrieve the 'auth_map' used in
|
||||
'checkResponse'.
|
||||
|
||||
To use 'checkResponse' you must have already verified the password associated
|
||||
with the 'username' key in 'auth_map' dict. Then you use the 'checkResponse'
|
||||
function to verify if the password matches the one sent by the client.
|
||||
To use 'checkResponse' you must have already verified the password
|
||||
associated with the 'username' key in 'auth_map' dict. Then you use the
|
||||
'checkResponse' function to verify if the password matches the one sent
|
||||
by the client.
|
||||
|
||||
SUPPORTED_ALGORITHM - list of supported 'Digest' algorithms
|
||||
SUPPORTED_QOP - list of supported 'Digest' 'qop'.
|
||||
@@ -21,7 +23,8 @@ SUPPORTED_QOP - list of supported 'Digest' 'qop'.
|
||||
__version__ = 1, 0, 1
|
||||
__author__ = "Tiago Cogumbreiro <cogumbreiro@users.sf.net>"
|
||||
__credits__ = """
|
||||
Peter van Kampen for its recipe which implement most of Digest authentication:
|
||||
Peter van Kampen for its recipe which implement most of Digest
|
||||
authentication:
|
||||
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/302378
|
||||
"""
|
||||
|
||||
@@ -29,27 +32,27 @@ __license__ = """
|
||||
Copyright (c) 2005, Tiago Cogumbreiro <cogumbreiro@users.sf.net>
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice,
|
||||
* Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
* Neither the name of Sylvain Hellegouarch nor the names of his contributors
|
||||
may be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
* Neither the name of Sylvain Hellegouarch nor the names of his
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
"""
|
||||
|
||||
@@ -57,7 +60,7 @@ __all__ = ("digestAuth", "basicAuth", "doAuth", "checkResponse",
|
||||
"parseAuthorization", "SUPPORTED_ALGORITHM", "md5SessionKey",
|
||||
"calculateNonce", "SUPPORTED_QOP")
|
||||
|
||||
################################################################################
|
||||
##########################################################################
|
||||
import time
|
||||
from cherrypy._cpcompat import base64_decode, ntob, md5
|
||||
from cherrypy._cpcompat import parse_http_list, parse_keqv_list
|
||||
@@ -70,16 +73,17 @@ AUTH_INT = "auth-int"
|
||||
SUPPORTED_ALGORITHM = (MD5, MD5_SESS)
|
||||
SUPPORTED_QOP = (AUTH, AUTH_INT)
|
||||
|
||||
################################################################################
|
||||
##########################################################################
|
||||
# doAuth
|
||||
#
|
||||
DIGEST_AUTH_ENCODERS = {
|
||||
MD5: lambda val: md5(ntob(val)).hexdigest(),
|
||||
MD5_SESS: lambda val: md5(ntob(val)).hexdigest(),
|
||||
# SHA: lambda val: sha.new(ntob(val)).hexdigest (),
|
||||
# SHA: lambda val: sha.new(ntob(val)).hexdigest (),
|
||||
}
|
||||
|
||||
def calculateNonce (realm, algorithm = MD5):
|
||||
|
||||
def calculateNonce(realm, algorithm=MD5):
|
||||
"""This is an auxaliary function that calculates 'nonce' value. It is used
|
||||
to handle sessions."""
|
||||
|
||||
@@ -89,44 +93,47 @@ def calculateNonce (realm, algorithm = MD5):
|
||||
try:
|
||||
encoder = DIGEST_AUTH_ENCODERS[algorithm]
|
||||
except KeyError:
|
||||
raise NotImplementedError ("The chosen algorithm (%s) does not have "\
|
||||
"an implementation yet" % algorithm)
|
||||
raise NotImplementedError("The chosen algorithm (%s) does not have "
|
||||
"an implementation yet" % algorithm)
|
||||
|
||||
return encoder ("%d:%s" % (time.time(), realm))
|
||||
return encoder("%d:%s" % (time.time(), realm))
|
||||
|
||||
def digestAuth (realm, algorithm = MD5, nonce = None, qop = AUTH):
|
||||
|
||||
def digestAuth(realm, algorithm=MD5, nonce=None, qop=AUTH):
|
||||
"""Challenges the client for a Digest authentication."""
|
||||
global SUPPORTED_ALGORITHM, DIGEST_AUTH_ENCODERS, SUPPORTED_QOP
|
||||
assert algorithm in SUPPORTED_ALGORITHM
|
||||
assert qop in SUPPORTED_QOP
|
||||
|
||||
if nonce is None:
|
||||
nonce = calculateNonce (realm, algorithm)
|
||||
nonce = calculateNonce(realm, algorithm)
|
||||
|
||||
return 'Digest realm="%s", nonce="%s", algorithm="%s", qop="%s"' % (
|
||||
realm, nonce, algorithm, qop
|
||||
)
|
||||
|
||||
def basicAuth (realm):
|
||||
|
||||
def basicAuth(realm):
|
||||
"""Challengenes the client for a Basic authentication."""
|
||||
assert '"' not in realm, "Realms cannot contain the \" (quote) character."
|
||||
|
||||
return 'Basic realm="%s"' % realm
|
||||
|
||||
def doAuth (realm):
|
||||
|
||||
def doAuth(realm):
|
||||
"""'doAuth' function returns the challenge string b giving priority over
|
||||
Digest and fallback to Basic authentication when the browser doesn't
|
||||
support the first one.
|
||||
|
||||
|
||||
This should be set in the HTTP header under the key 'WWW-Authenticate'."""
|
||||
|
||||
return digestAuth (realm) + " " + basicAuth (realm)
|
||||
return digestAuth(realm) + " " + basicAuth(realm)
|
||||
|
||||
|
||||
################################################################################
|
||||
##########################################################################
|
||||
# Parse authorization parameters
|
||||
#
|
||||
def _parseDigestAuthorization (auth_params):
|
||||
def _parseDigestAuthorization(auth_params):
|
||||
# Convert the auth params to a dict
|
||||
items = parse_http_list(auth_params)
|
||||
params = parse_keqv_list(items)
|
||||
@@ -140,8 +147,8 @@ def _parseDigestAuthorization (auth_params):
|
||||
return None
|
||||
|
||||
# If qop is sent then cnonce and nc MUST be present
|
||||
if "qop" in params and not ("cnonce" in params \
|
||||
and "nc" in params):
|
||||
if "qop" in params and not ("cnonce" in params
|
||||
and "nc" in params):
|
||||
return None
|
||||
|
||||
# If qop is not sent, neither cnonce nor nc can be present
|
||||
@@ -152,7 +159,7 @@ def _parseDigestAuthorization (auth_params):
|
||||
return params
|
||||
|
||||
|
||||
def _parseBasicAuthorization (auth_params):
|
||||
def _parseBasicAuthorization(auth_params):
|
||||
username, password = base64_decode(auth_params).split(":", 1)
|
||||
return {"username": username, "password": password}
|
||||
|
||||
@@ -161,18 +168,19 @@ AUTH_SCHEMES = {
|
||||
"digest": _parseDigestAuthorization,
|
||||
}
|
||||
|
||||
def parseAuthorization (credentials):
|
||||
|
||||
def parseAuthorization(credentials):
|
||||
"""parseAuthorization will convert the value of the 'Authorization' key in
|
||||
the HTTP header to a map itself. If the parsing fails 'None' is returned.
|
||||
"""
|
||||
|
||||
global AUTH_SCHEMES
|
||||
|
||||
auth_scheme, auth_params = credentials.split(" ", 1)
|
||||
auth_scheme = auth_scheme.lower ()
|
||||
auth_scheme, auth_params = credentials.split(" ", 1)
|
||||
auth_scheme = auth_scheme.lower()
|
||||
|
||||
parser = AUTH_SCHEMES[auth_scheme]
|
||||
params = parser (auth_params)
|
||||
params = parser(auth_params)
|
||||
|
||||
if params is None:
|
||||
return
|
||||
@@ -182,12 +190,12 @@ def parseAuthorization (credentials):
|
||||
return params
|
||||
|
||||
|
||||
################################################################################
|
||||
##########################################################################
|
||||
# Check provided response for a valid password
|
||||
#
|
||||
def md5SessionKey (params, password):
|
||||
def md5SessionKey(params, password):
|
||||
"""
|
||||
If the "algorithm" directive's value is "MD5-sess", then A1
|
||||
If the "algorithm" directive's value is "MD5-sess", then A1
|
||||
[the session key] is calculated only once - on the first request by the
|
||||
client following receipt of a WWW-Authenticate challenge from the server.
|
||||
|
||||
@@ -210,10 +218,11 @@ def md5SessionKey (params, password):
|
||||
params_copy[key] = params[key]
|
||||
|
||||
params_copy["algorithm"] = MD5_SESS
|
||||
return _A1 (params_copy, password)
|
||||
return _A1(params_copy, password)
|
||||
|
||||
|
||||
def _A1(params, password):
|
||||
algorithm = params.get ("algorithm", MD5)
|
||||
algorithm = params.get("algorithm", MD5)
|
||||
H = DIGEST_AUTH_ENCODERS[algorithm]
|
||||
|
||||
if algorithm == MD5:
|
||||
@@ -227,7 +236,7 @@ def _A1(params, password):
|
||||
# This is A1 if qop is set
|
||||
# A1 = H( unq(username-value) ":" unq(realm-value) ":" passwd )
|
||||
# ":" unq(nonce-value) ":" unq(cnonce-value)
|
||||
h_a1 = H ("%s:%s:%s" % (params["username"], params["realm"], password))
|
||||
h_a1 = H("%s:%s:%s" % (params["username"], params["realm"], password))
|
||||
return "%s:%s:%s" % (h_a1, params["nonce"], params["cnonce"])
|
||||
|
||||
|
||||
@@ -235,13 +244,13 @@ def _A2(params, method, kwargs):
|
||||
# If the "qop" directive's value is "auth" or is unspecified, then A2 is:
|
||||
# A2 = Method ":" digest-uri-value
|
||||
|
||||
qop = params.get ("qop", "auth")
|
||||
qop = params.get("qop", "auth")
|
||||
if qop == "auth":
|
||||
return method + ":" + params["uri"]
|
||||
elif qop == "auth-int":
|
||||
# If the "qop" value is "auth-int", then A2 is:
|
||||
# A2 = Method ":" digest-uri-value ":" H(entity-body)
|
||||
entity_body = kwargs.get ("entity_body", "")
|
||||
entity_body = kwargs.get("entity_body", "")
|
||||
H = kwargs["H"]
|
||||
|
||||
return "%s:%s:%s" % (
|
||||
@@ -251,20 +260,22 @@ def _A2(params, method, kwargs):
|
||||
)
|
||||
|
||||
else:
|
||||
raise NotImplementedError ("The 'qop' method is unknown: %s" % qop)
|
||||
raise NotImplementedError("The 'qop' method is unknown: %s" % qop)
|
||||
|
||||
def _computeDigestResponse(auth_map, password, method = "GET", A1 = None,**kwargs):
|
||||
|
||||
def _computeDigestResponse(auth_map, password, method="GET", A1=None,
|
||||
**kwargs):
|
||||
"""
|
||||
Generates a response respecting the algorithm defined in RFC 2617
|
||||
"""
|
||||
params = auth_map
|
||||
|
||||
algorithm = params.get ("algorithm", MD5)
|
||||
algorithm = params.get("algorithm", MD5)
|
||||
|
||||
H = DIGEST_AUTH_ENCODERS[algorithm]
|
||||
KD = lambda secret, data: H(secret + ":" + data)
|
||||
|
||||
qop = params.get ("qop", None)
|
||||
qop = params.get("qop", None)
|
||||
|
||||
H_A2 = H(_A2(params, method, kwargs))
|
||||
|
||||
@@ -297,7 +308,8 @@ def _computeDigestResponse(auth_map, password, method = "GET", A1 = None,**kwarg
|
||||
|
||||
return KD(H_A1, request)
|
||||
|
||||
def _checkDigestResponse(auth_map, password, method = "GET", A1 = None, **kwargs):
|
||||
|
||||
def _checkDigestResponse(auth_map, password, method="GET", A1=None, **kwargs):
|
||||
"""This function is used to verify the response given by the client when
|
||||
he tries to authenticate.
|
||||
Optional arguments:
|
||||
@@ -312,43 +324,48 @@ def _checkDigestResponse(auth_map, password, method = "GET", A1 = None, **kwargs
|
||||
if auth_map['realm'] != kwargs.get('realm', None):
|
||||
return False
|
||||
|
||||
response = _computeDigestResponse(auth_map, password, method, A1,**kwargs)
|
||||
response = _computeDigestResponse(
|
||||
auth_map, password, method, A1, **kwargs)
|
||||
|
||||
return response == auth_map["response"]
|
||||
|
||||
def _checkBasicResponse (auth_map, password, method='GET', encrypt=None, **kwargs):
|
||||
|
||||
def _checkBasicResponse(auth_map, password, method='GET', encrypt=None,
|
||||
**kwargs):
|
||||
# Note that the Basic response doesn't provide the realm value so we cannot
|
||||
# test it
|
||||
pass_through = lambda password, username=None: password
|
||||
encrypt = encrypt or pass_through
|
||||
try:
|
||||
return encrypt(auth_map["password"], auth_map["username"]) == password
|
||||
candidate = encrypt(auth_map["password"], auth_map["username"])
|
||||
except TypeError:
|
||||
return encrypt(auth_map["password"]) == password
|
||||
# if encrypt only takes one parameter, it's the password
|
||||
candidate = encrypt(auth_map["password"])
|
||||
return candidate == password
|
||||
|
||||
AUTH_RESPONSES = {
|
||||
"basic": _checkBasicResponse,
|
||||
"digest": _checkDigestResponse,
|
||||
}
|
||||
|
||||
def checkResponse (auth_map, password, method = "GET", encrypt=None, **kwargs):
|
||||
|
||||
def checkResponse(auth_map, password, method="GET", encrypt=None, **kwargs):
|
||||
"""'checkResponse' compares the auth_map with the password and optionally
|
||||
other arguments that each implementation might need.
|
||||
|
||||
|
||||
If the response is of type 'Basic' then the function has the following
|
||||
signature::
|
||||
|
||||
checkBasicResponse (auth_map, password) -> bool
|
||||
|
||||
|
||||
checkBasicResponse(auth_map, password) -> bool
|
||||
|
||||
If the response is of type 'Digest' then the function has the following
|
||||
signature::
|
||||
|
||||
checkDigestResponse (auth_map, password, method = 'GET', A1 = None) -> bool
|
||||
|
||||
|
||||
checkDigestResponse(auth_map, password, method='GET', A1=None) -> bool
|
||||
|
||||
The 'A1' argument is only used in MD5_SESS algorithm based responses.
|
||||
Check md5SessionKey() for more info.
|
||||
"""
|
||||
checker = AUTH_RESPONSES[auth_map["auth_scheme"]]
|
||||
return checker (auth_map, password, method=method, encrypt=encrypt, **kwargs)
|
||||
|
||||
|
||||
|
||||
|
||||
return checker(auth_map, password, method=method, encrypt=encrypt,
|
||||
**kwargs)
|
||||
|
||||
@@ -8,27 +8,27 @@ to a public caning.
|
||||
"""
|
||||
|
||||
from binascii import b2a_base64
|
||||
from cherrypy._cpcompat import BaseHTTPRequestHandler, HTTPDate, ntob, ntou, reversed, sorted
|
||||
from cherrypy._cpcompat import basestring, bytestr, iteritems, nativestr, unicodestr, unquote_qs
|
||||
from cherrypy._cpcompat import BaseHTTPRequestHandler, HTTPDate, ntob, ntou
|
||||
from cherrypy._cpcompat import basestring, bytestr, iteritems, nativestr
|
||||
from cherrypy._cpcompat import reversed, sorted, unicodestr, unquote_qs
|
||||
response_codes = BaseHTTPRequestHandler.responses.copy()
|
||||
|
||||
# From http://www.cherrypy.org/ticket/361
|
||||
# From https://bitbucket.org/cherrypy/cherrypy/issue/361
|
||||
response_codes[500] = ('Internal Server Error',
|
||||
'The server encountered an unexpected condition '
|
||||
'which prevented it from fulfilling the request.')
|
||||
'The server encountered an unexpected condition '
|
||||
'which prevented it from fulfilling the request.')
|
||||
response_codes[503] = ('Service Unavailable',
|
||||
'The server is currently unable to handle the '
|
||||
'request due to a temporary overloading or '
|
||||
'maintenance of the server.')
|
||||
'The server is currently unable to handle the '
|
||||
'request due to a temporary overloading or '
|
||||
'maintenance of the server.')
|
||||
|
||||
import re
|
||||
import urllib
|
||||
|
||||
|
||||
|
||||
def urljoin(*atoms):
|
||||
"""Return the given path \*atoms, joined into a single URL.
|
||||
|
||||
|
||||
This will correctly join a SCRIPT_NAME and PATH_INFO into the
|
||||
original URL, even if either atom is blank.
|
||||
"""
|
||||
@@ -38,9 +38,10 @@ def urljoin(*atoms):
|
||||
# Special-case the final url of "", and return "/" instead.
|
||||
return url or "/"
|
||||
|
||||
|
||||
def urljoin_bytes(*atoms):
|
||||
"""Return the given path *atoms, joined into a single URL.
|
||||
|
||||
|
||||
This will correctly join a SCRIPT_NAME and PATH_INFO into the
|
||||
original URL, even if either atom is blank.
|
||||
"""
|
||||
@@ -50,24 +51,26 @@ def urljoin_bytes(*atoms):
|
||||
# Special-case the final url of "", and return "/" instead.
|
||||
return url or ntob("/")
|
||||
|
||||
|
||||
def protocol_from_http(protocol_str):
|
||||
"""Return a protocol tuple from the given 'HTTP/x.y' string."""
|
||||
return int(protocol_str[5]), int(protocol_str[7])
|
||||
|
||||
|
||||
def get_ranges(headervalue, content_length):
|
||||
"""Return a list of (start, stop) indices from a Range header, or None.
|
||||
|
||||
|
||||
Each (start, stop) tuple will be composed of two ints, which are suitable
|
||||
for use in a slicing operation. That is, the header "Range: bytes=3-6",
|
||||
if applied against a Python string, is requesting resource[3:7]. This
|
||||
function will return the list [(3, 7)].
|
||||
|
||||
|
||||
If this function returns an empty list, you should return HTTP 416.
|
||||
"""
|
||||
|
||||
|
||||
if not headervalue:
|
||||
return None
|
||||
|
||||
|
||||
result = []
|
||||
bytesunit, byteranges = headervalue.split("=", 1)
|
||||
for brange in byteranges.split(","):
|
||||
@@ -100,36 +103,44 @@ def get_ranges(headervalue, content_length):
|
||||
# See rfc quote above.
|
||||
return None
|
||||
# Negative subscript (last N bytes)
|
||||
result.append((content_length - int(stop), content_length))
|
||||
|
||||
#
|
||||
# RFC 2616 Section 14.35.1:
|
||||
# If the entity is shorter than the specified suffix-length,
|
||||
# the entire entity-body is used.
|
||||
if int(stop) > content_length:
|
||||
result.append((0, content_length))
|
||||
else:
|
||||
result.append((content_length - int(stop), content_length))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class HeaderElement(object):
|
||||
|
||||
"""An element (with parameters) from an HTTP header's element list."""
|
||||
|
||||
|
||||
def __init__(self, value, params=None):
|
||||
self.value = value
|
||||
if params is None:
|
||||
params = {}
|
||||
self.params = params
|
||||
|
||||
|
||||
def __cmp__(self, other):
|
||||
return cmp(self.value, other.value)
|
||||
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.value < other.value
|
||||
|
||||
|
||||
def __str__(self):
|
||||
p = [";%s=%s" % (k, v) for k, v in iteritems(self.params)]
|
||||
return "%s%s" % (self.value, "".join(p))
|
||||
|
||||
return str("%s%s" % (self.value, "".join(p)))
|
||||
|
||||
def __bytes__(self):
|
||||
return ntob(self.__str__())
|
||||
|
||||
def __unicode__(self):
|
||||
return ntou(self.__str__())
|
||||
|
||||
|
||||
def parse(elementstr):
|
||||
"""Transform 'token;key=val' to ('token', {'key': 'val'})."""
|
||||
# Split the element into a value and parameters. The 'value' may
|
||||
@@ -150,7 +161,7 @@ class HeaderElement(object):
|
||||
params[key] = val
|
||||
return initial_value, params
|
||||
parse = staticmethod(parse)
|
||||
|
||||
|
||||
def from_str(cls, elementstr):
|
||||
"""Construct an instance from a string of the form 'token;key=val'."""
|
||||
ival, params = cls.parse(elementstr)
|
||||
@@ -160,16 +171,18 @@ class HeaderElement(object):
|
||||
|
||||
q_separator = re.compile(r'; *q *=')
|
||||
|
||||
|
||||
class AcceptElement(HeaderElement):
|
||||
|
||||
"""An element (with parameters) from an Accept* header's element list.
|
||||
|
||||
|
||||
AcceptElement objects are comparable; the more-preferred object will be
|
||||
"less than" the less-preferred object. They are also therefore sortable;
|
||||
if you sort a list of AcceptElement objects, they will be listed in
|
||||
priority order; the most preferred value will be first. Yes, it should
|
||||
have been the other way around, but it's too late to fix now.
|
||||
"""
|
||||
|
||||
|
||||
def from_str(cls, elementstr):
|
||||
qvalue = None
|
||||
# The first "q" parameter (if any) separates the initial
|
||||
@@ -180,48 +193,50 @@ class AcceptElement(HeaderElement):
|
||||
# The qvalue for an Accept header can have extensions. The other
|
||||
# headers cannot, but it's easier to parse them as if they did.
|
||||
qvalue = HeaderElement.from_str(atoms[0].strip())
|
||||
|
||||
|
||||
media_type, params = cls.parse(media_range)
|
||||
if qvalue is not None:
|
||||
params["q"] = qvalue
|
||||
return cls(media_type, params)
|
||||
from_str = classmethod(from_str)
|
||||
|
||||
|
||||
def qvalue(self):
|
||||
val = self.params.get("q", "1")
|
||||
if isinstance(val, HeaderElement):
|
||||
val = val.value
|
||||
return float(val)
|
||||
qvalue = property(qvalue, doc="The qvalue, or priority, of this value.")
|
||||
|
||||
|
||||
def __cmp__(self, other):
|
||||
diff = cmp(self.qvalue, other.qvalue)
|
||||
if diff == 0:
|
||||
diff = cmp(str(self), str(other))
|
||||
return diff
|
||||
|
||||
|
||||
def __lt__(self, other):
|
||||
if self.qvalue == other.qvalue:
|
||||
return str(self) < str(other)
|
||||
else:
|
||||
return self.qvalue < other.qvalue
|
||||
|
||||
|
||||
RE_HEADER_SPLIT = re.compile(',(?=(?:[^"]*"[^"]*")*[^"]*$)')
|
||||
def header_elements(fieldname, fieldvalue):
|
||||
"""Return a sorted HeaderElement list from a comma-separated header string."""
|
||||
"""Return a sorted HeaderElement list from a comma-separated header string.
|
||||
"""
|
||||
if not fieldvalue:
|
||||
return []
|
||||
|
||||
|
||||
result = []
|
||||
for element in fieldvalue.split(","):
|
||||
for element in RE_HEADER_SPLIT.split(fieldvalue):
|
||||
if fieldname.startswith("Accept") or fieldname == 'TE':
|
||||
hv = AcceptElement.from_str(element)
|
||||
else:
|
||||
hv = HeaderElement.from_str(element)
|
||||
result.append(hv)
|
||||
|
||||
|
||||
return list(reversed(sorted(result)))
|
||||
|
||||
|
||||
def decode_TEXT(value):
|
||||
r"""Decode :rfc:`2047` TEXT (e.g. "=?utf-8?q?f=C3=BCr?=" -> "f\xfcr")."""
|
||||
try:
|
||||
@@ -237,18 +252,19 @@ def decode_TEXT(value):
|
||||
decodedvalue += atom
|
||||
return decodedvalue
|
||||
|
||||
|
||||
def valid_status(status):
|
||||
"""Return legal HTTP status Code, Reason-phrase and Message.
|
||||
|
||||
|
||||
The status arg must be an int, or a str that begins with an int.
|
||||
|
||||
|
||||
If status is an int, or a str and no reason-phrase is supplied,
|
||||
a default reason-phrase will be provided.
|
||||
"""
|
||||
|
||||
|
||||
if not status:
|
||||
status = 200
|
||||
|
||||
|
||||
status = str(status)
|
||||
parts = status.split(" ", 1)
|
||||
if len(parts) == 1:
|
||||
@@ -258,26 +274,26 @@ def valid_status(status):
|
||||
else:
|
||||
code, reason = parts
|
||||
reason = reason.strip()
|
||||
|
||||
|
||||
try:
|
||||
code = int(code)
|
||||
except ValueError:
|
||||
raise ValueError("Illegal response status from server "
|
||||
"(%s is non-numeric)." % repr(code))
|
||||
|
||||
|
||||
if code < 100 or code > 599:
|
||||
raise ValueError("Illegal response status from server "
|
||||
"(%s is out of range)." % repr(code))
|
||||
|
||||
|
||||
if code not in response_codes:
|
||||
# code is unknown but not illegal
|
||||
default_reason, message = "", ""
|
||||
else:
|
||||
default_reason, message = response_codes[code]
|
||||
|
||||
|
||||
if reason is None:
|
||||
reason = default_reason
|
||||
|
||||
|
||||
return code, reason, message
|
||||
|
||||
|
||||
@@ -287,21 +303,21 @@ def valid_status(status):
|
||||
|
||||
def _parse_qs(qs, keep_blank_values=0, strict_parsing=0, encoding='utf-8'):
|
||||
"""Parse a query given as a string argument.
|
||||
|
||||
|
||||
Arguments:
|
||||
|
||||
|
||||
qs: URL-encoded query string to be parsed
|
||||
|
||||
|
||||
keep_blank_values: flag indicating whether blank values in
|
||||
URL encoded queries should be treated as blank strings. A
|
||||
true value indicates that blanks should be retained as blank
|
||||
strings. The default false value indicates that blank values
|
||||
are to be ignored and treated as if they were not included.
|
||||
|
||||
|
||||
strict_parsing: flag indicating what to do with parsing errors. If
|
||||
false (the default), errors are silently ignored. If true,
|
||||
errors raise a ValueError exception.
|
||||
|
||||
|
||||
Returns a dict, as G-d intended.
|
||||
"""
|
||||
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
|
||||
@@ -332,9 +348,10 @@ def _parse_qs(qs, keep_blank_values=0, strict_parsing=0, encoding='utf-8'):
|
||||
|
||||
image_map_pattern = re.compile(r"[0-9]+,[0-9]+")
|
||||
|
||||
|
||||
def parse_query_string(query_string, keep_blank_values=True, encoding='utf-8'):
|
||||
"""Build a params dictionary from a query_string.
|
||||
|
||||
|
||||
Duplicate key/value pairs in the provided query_string will be
|
||||
returned as {'key': [val1, val2, ...]}. Single key/values will
|
||||
be returned as strings: {'key': 'value'}.
|
||||
@@ -350,41 +367,42 @@ def parse_query_string(query_string, keep_blank_values=True, encoding='utf-8'):
|
||||
|
||||
|
||||
class CaseInsensitiveDict(dict):
|
||||
|
||||
"""A case-insensitive dict subclass.
|
||||
|
||||
|
||||
Each key is changed on entry to str(key).title().
|
||||
"""
|
||||
|
||||
|
||||
def __getitem__(self, key):
|
||||
return dict.__getitem__(self, str(key).title())
|
||||
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
dict.__setitem__(self, str(key).title(), value)
|
||||
|
||||
|
||||
def __delitem__(self, key):
|
||||
dict.__delitem__(self, str(key).title())
|
||||
|
||||
|
||||
def __contains__(self, key):
|
||||
return dict.__contains__(self, str(key).title())
|
||||
|
||||
|
||||
def get(self, key, default=None):
|
||||
return dict.get(self, str(key).title(), default)
|
||||
|
||||
|
||||
if hasattr({}, 'has_key'):
|
||||
def has_key(self, key):
|
||||
return dict.has_key(self, str(key).title())
|
||||
|
||||
return str(key).title() in self
|
||||
|
||||
def update(self, E):
|
||||
for k in E.keys():
|
||||
self[str(k).title()] = E[k]
|
||||
|
||||
|
||||
def fromkeys(cls, seq, value=None):
|
||||
newdict = cls()
|
||||
for k in seq:
|
||||
newdict[str(k).title()] = value
|
||||
return newdict
|
||||
fromkeys = classmethod(fromkeys)
|
||||
|
||||
|
||||
def setdefault(self, key, x=None):
|
||||
key = str(key).title()
|
||||
try:
|
||||
@@ -392,7 +410,7 @@ class CaseInsensitiveDict(dict):
|
||||
except KeyError:
|
||||
self[key] = x
|
||||
return x
|
||||
|
||||
|
||||
def pop(self, key, default):
|
||||
return dict.pop(self, str(key).title(), default)
|
||||
|
||||
@@ -404,103 +422,115 @@ class CaseInsensitiveDict(dict):
|
||||
# replaced with a single SP before interpretation of the TEXT value."
|
||||
if nativestr == bytestr:
|
||||
header_translate_table = ''.join([chr(i) for i in xrange(256)])
|
||||
header_translate_deletechars = ''.join([chr(i) for i in xrange(32)]) + chr(127)
|
||||
header_translate_deletechars = ''.join(
|
||||
[chr(i) for i in xrange(32)]) + chr(127)
|
||||
else:
|
||||
header_translate_table = None
|
||||
header_translate_deletechars = bytes(range(32)) + bytes([127])
|
||||
|
||||
|
||||
class HeaderMap(CaseInsensitiveDict):
|
||||
|
||||
"""A dict subclass for HTTP request and response headers.
|
||||
|
||||
|
||||
Each key is changed on entry to str(key).title(). This allows headers
|
||||
to be case-insensitive and avoid duplicates.
|
||||
|
||||
|
||||
Values are header values (decoded according to :rfc:`2047` if necessary).
|
||||
"""
|
||||
|
||||
protocol=(1, 1)
|
||||
|
||||
protocol = (1, 1)
|
||||
encodings = ["ISO-8859-1"]
|
||||
|
||||
|
||||
# Someday, when http-bis is done, this will probably get dropped
|
||||
# since few servers, clients, or intermediaries do it. But until then,
|
||||
# we're going to obey the spec as is.
|
||||
# "Words of *TEXT MAY contain characters from character sets other than
|
||||
# ISO-8859-1 only when encoded according to the rules of RFC 2047."
|
||||
use_rfc_2047 = True
|
||||
|
||||
|
||||
def elements(self, key):
|
||||
"""Return a sorted list of HeaderElements for the given header."""
|
||||
key = str(key).title()
|
||||
value = self.get(key)
|
||||
return header_elements(key, value)
|
||||
|
||||
|
||||
def values(self, key):
|
||||
"""Return a sorted list of HeaderElement.value for the given header."""
|
||||
return [e.value for e in self.elements(key)]
|
||||
|
||||
|
||||
def output(self):
|
||||
"""Transform self into a list of (name, value) tuples."""
|
||||
header_list = []
|
||||
for k, v in self.items():
|
||||
return list(self.encode_header_items(self.items()))
|
||||
|
||||
def encode_header_items(cls, header_items):
|
||||
"""
|
||||
Prepare the sequence of name, value tuples into a form suitable for
|
||||
transmitting on the wire for HTTP.
|
||||
"""
|
||||
for k, v in header_items:
|
||||
if isinstance(k, unicodestr):
|
||||
k = self.encode(k)
|
||||
|
||||
k = cls.encode(k)
|
||||
|
||||
if not isinstance(v, basestring):
|
||||
v = str(v)
|
||||
|
||||
|
||||
if isinstance(v, unicodestr):
|
||||
v = self.encode(v)
|
||||
|
||||
v = cls.encode(v)
|
||||
|
||||
# See header_translate_* constants above.
|
||||
# Replace only if you really know what you're doing.
|
||||
k = k.translate(header_translate_table, header_translate_deletechars)
|
||||
v = v.translate(header_translate_table, header_translate_deletechars)
|
||||
|
||||
header_list.append((k, v))
|
||||
return header_list
|
||||
|
||||
def encode(self, v):
|
||||
k = k.translate(header_translate_table,
|
||||
header_translate_deletechars)
|
||||
v = v.translate(header_translate_table,
|
||||
header_translate_deletechars)
|
||||
|
||||
yield (k, v)
|
||||
encode_header_items = classmethod(encode_header_items)
|
||||
|
||||
def encode(cls, v):
|
||||
"""Return the given header name or value, encoded for HTTP output."""
|
||||
for enc in self.encodings:
|
||||
for enc in cls.encodings:
|
||||
try:
|
||||
return v.encode(enc)
|
||||
except UnicodeEncodeError:
|
||||
continue
|
||||
|
||||
if self.protocol == (1, 1) and self.use_rfc_2047:
|
||||
# Encode RFC-2047 TEXT
|
||||
# (e.g. u"\u8200" -> "=?utf-8?b?6IiA?=").
|
||||
|
||||
if cls.protocol == (1, 1) and cls.use_rfc_2047:
|
||||
# Encode RFC-2047 TEXT
|
||||
# (e.g. u"\u8200" -> "=?utf-8?b?6IiA?=").
|
||||
# We do our own here instead of using the email module
|
||||
# because we never want to fold lines--folding has
|
||||
# been deprecated by the HTTP working group.
|
||||
v = b2a_base64(v.encode('utf-8'))
|
||||
return (ntob('=?utf-8?b?') + v.strip(ntob('\n')) + ntob('?='))
|
||||
|
||||
|
||||
raise ValueError("Could not encode header part %r using "
|
||||
"any of the encodings %r." %
|
||||
(v, self.encodings))
|
||||
(v, cls.encodings))
|
||||
encode = classmethod(encode)
|
||||
|
||||
|
||||
class Host(object):
|
||||
|
||||
"""An internet address.
|
||||
|
||||
|
||||
name
|
||||
Should be the client's host name. If not available (because no DNS
|
||||
lookup is performed), the IP address should be used instead.
|
||||
|
||||
|
||||
"""
|
||||
|
||||
|
||||
ip = "0.0.0.0"
|
||||
port = 80
|
||||
name = "unknown.tld"
|
||||
|
||||
|
||||
def __init__(self, ip, port, name=None):
|
||||
self.ip = ip
|
||||
self.port = port
|
||||
if name is None:
|
||||
name = ip
|
||||
self.name = name
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
return "httputil.Host(%r, %r, %r)" % (self.ip, self.port, self.name)
|
||||
|
||||
@@ -1,20 +1,21 @@
|
||||
import sys
|
||||
import cherrypy
|
||||
from cherrypy._cpcompat import basestring, ntou, json, json_encode, json_decode
|
||||
from cherrypy._cpcompat import basestring, ntou, json_encode, json_decode
|
||||
|
||||
|
||||
def json_processor(entity):
|
||||
"""Read application/json data into request.json."""
|
||||
if not entity.headers.get(ntou("Content-Length"), ntou("")):
|
||||
raise cherrypy.HTTPError(411)
|
||||
|
||||
|
||||
body = entity.fp.read()
|
||||
try:
|
||||
cherrypy.serving.request.json = json_decode(body.decode('utf-8'))
|
||||
except ValueError:
|
||||
raise cherrypy.HTTPError(400, 'Invalid JSON document')
|
||||
|
||||
|
||||
def json_in(content_type=[ntou('application/json'), ntou('text/javascript')],
|
||||
force=True, debug=False, processor = json_processor):
|
||||
force=True, debug=False, processor=json_processor):
|
||||
"""Add a processor to parse JSON request entities:
|
||||
The default processor places the parsed data into request.json.
|
||||
|
||||
@@ -22,11 +23,11 @@ def json_in(content_type=[ntou('application/json'), ntou('text/javascript')],
|
||||
be deserialized from JSON to the Python equivalent, and the result
|
||||
stored at cherrypy.request.json. The 'content_type' argument may
|
||||
be a Content-Type string or a list of allowable Content-Type strings.
|
||||
|
||||
|
||||
If the 'force' argument is True (the default), then entities of other
|
||||
content types will not be allowed; "415 Unsupported Media Type" is
|
||||
raised instead.
|
||||
|
||||
|
||||
Supply your own processor to use a custom decoder, or to handle the parsed
|
||||
data differently. The processor can be configured via
|
||||
tools.json_in.processor or via the decorator method.
|
||||
@@ -35,14 +36,14 @@ def json_in(content_type=[ntou('application/json'), ntou('text/javascript')],
|
||||
request header, or it will raise "411 Length Required". If for any
|
||||
other reason the request entity cannot be deserialized from JSON,
|
||||
it will raise "400 Bad Request: Invalid JSON document".
|
||||
|
||||
|
||||
You must be using Python 2.6 or greater, or have the 'simplejson'
|
||||
package importable; otherwise, ValueError is raised during processing.
|
||||
"""
|
||||
request = cherrypy.serving.request
|
||||
if isinstance(content_type, basestring):
|
||||
content_type = [content_type]
|
||||
|
||||
|
||||
if force:
|
||||
if debug:
|
||||
cherrypy.log('Removing body processors %s' %
|
||||
@@ -51,19 +52,22 @@ def json_in(content_type=[ntou('application/json'), ntou('text/javascript')],
|
||||
request.body.default_proc = cherrypy.HTTPError(
|
||||
415, 'Expected an entity of content type %s' %
|
||||
', '.join(content_type))
|
||||
|
||||
|
||||
for ct in content_type:
|
||||
if debug:
|
||||
cherrypy.log('Adding body processor for %s' % ct, 'TOOLS.JSON_IN')
|
||||
request.body.processors[ct] = processor
|
||||
|
||||
|
||||
def json_handler(*args, **kwargs):
|
||||
value = cherrypy.serving.request._json_inner_handler(*args, **kwargs)
|
||||
return json_encode(value)
|
||||
|
||||
def json_out(content_type='application/json', debug=False, handler=json_handler):
|
||||
|
||||
def json_out(content_type='application/json', debug=False,
|
||||
handler=json_handler):
|
||||
"""Wrap request.handler to serialize its output to JSON. Sets Content-Type.
|
||||
|
||||
|
||||
If the given content_type is None, the Content-Type response header
|
||||
is not set.
|
||||
|
||||
@@ -75,6 +79,11 @@ def json_out(content_type='application/json', debug=False, handler=json_handler)
|
||||
package importable; otherwise, ValueError is raised during processing.
|
||||
"""
|
||||
request = cherrypy.serving.request
|
||||
# request.handler may be set to None by e.g. the caching tool
|
||||
# to signal to all components that a response body has already
|
||||
# been attached, in which case we don't need to wrap anything.
|
||||
if request.handler is None:
|
||||
return
|
||||
if debug:
|
||||
cherrypy.log('Replacing %s with JSON handler' % request.handler,
|
||||
'TOOLS.JSON_OUT')
|
||||
@@ -82,6 +91,6 @@ def json_out(content_type='application/json', debug=False, handler=json_handler)
|
||||
request.handler = handler
|
||||
if content_type is not None:
|
||||
if debug:
|
||||
cherrypy.log('Setting Content-Type to %s' % content_type, 'TOOLS.JSON_OUT')
|
||||
cherrypy.log('Setting Content-Type to %s' %
|
||||
content_type, 'TOOLS.JSON_OUT')
|
||||
cherrypy.serving.response.headers['Content-Type'] = content_type
|
||||
|
||||
|
||||
147
libs/cherrypy/lib/lockfile.py
Normal file
147
libs/cherrypy/lib/lockfile.py
Normal file
@@ -0,0 +1,147 @@
|
||||
"""
|
||||
Platform-independent file locking. Inspired by and modeled after zc.lockfile.
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
try:
|
||||
import msvcrt
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
try:
|
||||
import fcntl
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class LockError(Exception):
|
||||
|
||||
"Could not obtain a lock"
|
||||
|
||||
msg = "Unable to lock %r"
|
||||
|
||||
def __init__(self, path):
|
||||
super(LockError, self).__init__(self.msg % path)
|
||||
|
||||
|
||||
class UnlockError(LockError):
|
||||
|
||||
"Could not release a lock"
|
||||
|
||||
msg = "Unable to unlock %r"
|
||||
|
||||
|
||||
# first, a default, naive locking implementation
|
||||
class LockFile(object):
|
||||
|
||||
"""
|
||||
A default, naive locking implementation. Always fails if the file
|
||||
already exists.
|
||||
"""
|
||||
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
try:
|
||||
fd = os.open(path, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
|
||||
except OSError:
|
||||
raise LockError(self.path)
|
||||
os.close(fd)
|
||||
|
||||
def release(self):
|
||||
os.remove(self.path)
|
||||
|
||||
def remove(self):
|
||||
pass
|
||||
|
||||
|
||||
class SystemLockFile(object):
|
||||
|
||||
"""
|
||||
An abstract base class for platform-specific locking.
|
||||
"""
|
||||
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
|
||||
try:
|
||||
# Open lockfile for writing without truncation:
|
||||
self.fp = open(path, 'r+')
|
||||
except IOError:
|
||||
# If the file doesn't exist, IOError is raised; Use a+ instead.
|
||||
# Note that there may be a race here. Multiple processes
|
||||
# could fail on the r+ open and open the file a+, but only
|
||||
# one will get the the lock and write a pid.
|
||||
self.fp = open(path, 'a+')
|
||||
|
||||
try:
|
||||
self._lock_file()
|
||||
except:
|
||||
self.fp.seek(1)
|
||||
self.fp.close()
|
||||
del self.fp
|
||||
raise
|
||||
|
||||
self.fp.write(" %s\n" % os.getpid())
|
||||
self.fp.truncate()
|
||||
self.fp.flush()
|
||||
|
||||
def release(self):
|
||||
if not hasattr(self, 'fp'):
|
||||
return
|
||||
self._unlock_file()
|
||||
self.fp.close()
|
||||
del self.fp
|
||||
|
||||
def remove(self):
|
||||
"""
|
||||
Attempt to remove the file
|
||||
"""
|
||||
try:
|
||||
os.remove(self.path)
|
||||
except:
|
||||
pass
|
||||
|
||||
#@abc.abstract_method
|
||||
# def _lock_file(self):
|
||||
# """Attempt to obtain the lock on self.fp. Raise LockError if not
|
||||
# acquired."""
|
||||
|
||||
def _unlock_file(self):
|
||||
"""Attempt to obtain the lock on self.fp. Raise UnlockError if not
|
||||
released."""
|
||||
|
||||
|
||||
class WindowsLockFile(SystemLockFile):
|
||||
|
||||
def _lock_file(self):
|
||||
# Lock just the first byte
|
||||
try:
|
||||
msvcrt.locking(self.fp.fileno(), msvcrt.LK_NBLCK, 1)
|
||||
except IOError:
|
||||
raise LockError(self.fp.name)
|
||||
|
||||
def _unlock_file(self):
|
||||
try:
|
||||
self.fp.seek(0)
|
||||
msvcrt.locking(self.fp.fileno(), msvcrt.LK_UNLCK, 1)
|
||||
except IOError:
|
||||
raise UnlockError(self.fp.name)
|
||||
|
||||
if 'msvcrt' in globals():
|
||||
LockFile = WindowsLockFile
|
||||
|
||||
|
||||
class UnixLockFile(SystemLockFile):
|
||||
|
||||
def _lock_file(self):
|
||||
flags = fcntl.LOCK_EX | fcntl.LOCK_NB
|
||||
try:
|
||||
fcntl.flock(self.fp.fileno(), flags)
|
||||
except IOError:
|
||||
raise LockError(self.fp.name)
|
||||
|
||||
# no need to implement _unlock_file, it will be unlocked on close()
|
||||
|
||||
if 'fcntl' in globals():
|
||||
LockFile = UnixLockFile
|
||||
47
libs/cherrypy/lib/locking.py
Normal file
47
libs/cherrypy/lib/locking.py
Normal file
@@ -0,0 +1,47 @@
|
||||
import datetime
|
||||
|
||||
|
||||
class NeverExpires(object):
|
||||
def expired(self):
|
||||
return False
|
||||
|
||||
|
||||
class Timer(object):
|
||||
"""
|
||||
A simple timer that will indicate when an expiration time has passed.
|
||||
"""
|
||||
def __init__(self, expiration):
|
||||
"Create a timer that expires at `expiration` (UTC datetime)"
|
||||
self.expiration = expiration
|
||||
|
||||
@classmethod
|
||||
def after(cls, elapsed):
|
||||
"""
|
||||
Return a timer that will expire after `elapsed` passes.
|
||||
"""
|
||||
return cls(datetime.datetime.utcnow() + elapsed)
|
||||
|
||||
def expired(self):
|
||||
return datetime.datetime.utcnow() >= self.expiration
|
||||
|
||||
|
||||
class LockTimeout(Exception):
|
||||
"An exception when a lock could not be acquired before a timeout period"
|
||||
|
||||
|
||||
class LockChecker(object):
|
||||
"""
|
||||
Keep track of the time and detect if a timeout has expired
|
||||
"""
|
||||
def __init__(self, session_id, timeout):
|
||||
self.session_id = session_id
|
||||
if timeout:
|
||||
self.timer = Timer.after(timeout)
|
||||
else:
|
||||
self.timer = NeverExpires()
|
||||
|
||||
def expired(self):
|
||||
if self.timer.expired():
|
||||
raise LockTimeout(
|
||||
"Timeout acquiring lock for %(session_id)s" % vars(self))
|
||||
return False
|
||||
@@ -6,17 +6,17 @@ CherryPy users
|
||||
You can profile any of your pages as follows::
|
||||
|
||||
from cherrypy.lib import profiler
|
||||
|
||||
|
||||
class Root:
|
||||
p = profile.Profiler("/path/to/profile/dir")
|
||||
|
||||
|
||||
def index(self):
|
||||
self.p.run(self._index)
|
||||
index.exposed = True
|
||||
|
||||
|
||||
def _index(self):
|
||||
return "Hello, world!"
|
||||
|
||||
|
||||
cherrypy.tree.mount(Root())
|
||||
|
||||
You can also turn on profiling for all requests
|
||||
@@ -35,7 +35,8 @@ module from the command line, it will call ``serve()`` for you.
|
||||
|
||||
|
||||
def new_func_strip_path(func_name):
|
||||
"""Make profiler output more readable by adding ``__init__`` modules' parents"""
|
||||
"""Make profiler output more readable by adding `__init__` modules' parents
|
||||
"""
|
||||
filename, line, name = func_name
|
||||
if filename.endswith("__init__.py"):
|
||||
return os.path.basename(filename[:-12]) + filename[-12:], line, name
|
||||
@@ -49,23 +50,25 @@ except ImportError:
|
||||
profile = None
|
||||
pstats = None
|
||||
|
||||
import os, os.path
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
from cherrypy._cpcompat import BytesIO
|
||||
from cherrypy._cpcompat import StringIO
|
||||
|
||||
_count = 0
|
||||
|
||||
|
||||
class Profiler(object):
|
||||
|
||||
|
||||
def __init__(self, path=None):
|
||||
if not path:
|
||||
path = os.path.join(os.path.dirname(__file__), "profile")
|
||||
self.path = path
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
|
||||
|
||||
def run(self, func, *args, **params):
|
||||
"""Dump profile data into self.path."""
|
||||
global _count
|
||||
@@ -75,17 +78,17 @@ class Profiler(object):
|
||||
result = prof.runcall(func, *args, **params)
|
||||
prof.dump_stats(path)
|
||||
return result
|
||||
|
||||
|
||||
def statfiles(self):
|
||||
""":rtype: list of available profiles.
|
||||
"""
|
||||
return [f for f in os.listdir(self.path)
|
||||
if f.startswith("cp_") and f.endswith(".prof")]
|
||||
|
||||
|
||||
def stats(self, filename, sortby='cumulative'):
|
||||
""":rtype stats(index): output of print_stats() for the given profile.
|
||||
"""
|
||||
sio = BytesIO()
|
||||
sio = StringIO()
|
||||
if sys.version_info >= (2, 5):
|
||||
s = pstats.Stats(os.path.join(self.path, filename), stream=sio)
|
||||
s.strip_dirs()
|
||||
@@ -106,7 +109,7 @@ class Profiler(object):
|
||||
response = sio.getvalue()
|
||||
sio.close()
|
||||
return response
|
||||
|
||||
|
||||
def index(self):
|
||||
return """<html>
|
||||
<head><title>CherryPy profile data</title></head>
|
||||
@@ -117,16 +120,17 @@ class Profiler(object):
|
||||
</html>
|
||||
"""
|
||||
index.exposed = True
|
||||
|
||||
|
||||
def menu(self):
|
||||
yield "<h2>Profiling runs</h2>"
|
||||
yield "<p>Click on one of the runs below to see profiling data.</p>"
|
||||
runs = self.statfiles()
|
||||
runs.sort()
|
||||
for i in runs:
|
||||
yield "<a href='report?filename=%s' target='main'>%s</a><br />" % (i, i)
|
||||
yield "<a href='report?filename=%s' target='main'>%s</a><br />" % (
|
||||
i, i)
|
||||
menu.exposed = True
|
||||
|
||||
|
||||
def report(self, filename):
|
||||
import cherrypy
|
||||
cherrypy.response.headers['Content-Type'] = 'text/plain'
|
||||
@@ -135,50 +139,53 @@ class Profiler(object):
|
||||
|
||||
|
||||
class ProfileAggregator(Profiler):
|
||||
|
||||
|
||||
def __init__(self, path=None):
|
||||
Profiler.__init__(self, path)
|
||||
global _count
|
||||
self.count = _count = _count + 1
|
||||
self.profiler = profile.Profile()
|
||||
|
||||
def run(self, func, *args):
|
||||
|
||||
def run(self, func, *args, **params):
|
||||
path = os.path.join(self.path, "cp_%04d.prof" % self.count)
|
||||
result = self.profiler.runcall(func, *args)
|
||||
result = self.profiler.runcall(func, *args, **params)
|
||||
self.profiler.dump_stats(path)
|
||||
return result
|
||||
|
||||
|
||||
class make_app:
|
||||
|
||||
def __init__(self, nextapp, path=None, aggregate=False):
|
||||
"""Make a WSGI middleware app which wraps 'nextapp' with profiling.
|
||||
|
||||
|
||||
nextapp
|
||||
the WSGI application to wrap, usually an instance of
|
||||
cherrypy.Application.
|
||||
|
||||
|
||||
path
|
||||
where to dump the profiling output.
|
||||
|
||||
|
||||
aggregate
|
||||
if True, profile data for all HTTP requests will go in
|
||||
a single file. If False (the default), each HTTP request will
|
||||
dump its profile data into a separate file.
|
||||
|
||||
|
||||
"""
|
||||
if profile is None or pstats is None:
|
||||
msg = ("Your installation of Python does not have a profile module. "
|
||||
"If you're on Debian, try `sudo apt-get install python-profiler`. "
|
||||
"See http://www.cherrypy.org/wiki/ProfilingOnDebian for details.")
|
||||
msg = ("Your installation of Python does not have a profile "
|
||||
"module. If you're on Debian, try "
|
||||
"`sudo apt-get install python-profiler`. "
|
||||
"See http://www.cherrypy.org/wiki/ProfilingOnDebian "
|
||||
"for details.")
|
||||
warnings.warn(msg)
|
||||
|
||||
|
||||
self.nextapp = nextapp
|
||||
self.aggregate = aggregate
|
||||
if aggregate:
|
||||
self.profiler = ProfileAggregator(path)
|
||||
else:
|
||||
self.profiler = Profiler(path)
|
||||
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
def gather():
|
||||
result = []
|
||||
@@ -191,10 +198,12 @@ class make_app:
|
||||
def serve(path=None, port=8080):
|
||||
if profile is None or pstats is None:
|
||||
msg = ("Your installation of Python does not have a profile module. "
|
||||
"If you're on Debian, try `sudo apt-get install python-profiler`. "
|
||||
"See http://www.cherrypy.org/wiki/ProfilingOnDebian for details.")
|
||||
"If you're on Debian, try "
|
||||
"`sudo apt-get install python-profiler`. "
|
||||
"See http://www.cherrypy.org/wiki/ProfilingOnDebian "
|
||||
"for details.")
|
||||
warnings.warn(msg)
|
||||
|
||||
|
||||
import cherrypy
|
||||
cherrypy.config.update({'server.socket_port': int(port),
|
||||
'server.thread_pool': 10,
|
||||
@@ -205,4 +214,3 @@ def serve(path=None, port=8080):
|
||||
|
||||
if __name__ == "__main__":
|
||||
serve(*tuple(sys.argv[1:]))
|
||||
|
||||
|
||||
@@ -44,6 +44,7 @@ except ImportError:
|
||||
import operator as _operator
|
||||
import sys
|
||||
|
||||
|
||||
def as_dict(config):
|
||||
"""Return a dict from 'config' whether it is a dict, file, or filename."""
|
||||
if isinstance(config, basestring):
|
||||
@@ -54,26 +55,27 @@ def as_dict(config):
|
||||
|
||||
|
||||
class NamespaceSet(dict):
|
||||
|
||||
"""A dict of config namespace names and handlers.
|
||||
|
||||
|
||||
Each config entry should begin with a namespace name; the corresponding
|
||||
namespace handler will be called once for each config entry in that
|
||||
namespace, and will be passed two arguments: the config key (with the
|
||||
namespace removed) and the config value.
|
||||
|
||||
|
||||
Namespace handlers may be any Python callable; they may also be
|
||||
Python 2.5-style 'context managers', in which case their __enter__
|
||||
method should return a callable to be used as the handler.
|
||||
See cherrypy.tools (the Toolbox class) for an example.
|
||||
"""
|
||||
|
||||
|
||||
def __call__(self, config):
|
||||
"""Iterate through config and pass it to each namespace handler.
|
||||
|
||||
|
||||
config
|
||||
A flat dict, where keys use dots to separate
|
||||
namespaces, and values are arbitrary.
|
||||
|
||||
|
||||
The first name in each config key is used to look up the corresponding
|
||||
namespace handler. For example, a config entry of {'tools.gzip.on': v}
|
||||
will call the 'tools' namespace handler with the args: ('gzip.on', v)
|
||||
@@ -85,7 +87,7 @@ class NamespaceSet(dict):
|
||||
ns, name = k.split(".", 1)
|
||||
bucket = ns_confs.setdefault(ns, {})
|
||||
bucket[name] = config[k]
|
||||
|
||||
|
||||
# I chose __enter__ and __exit__ so someday this could be
|
||||
# rewritten using Python 2.5's 'with' statement:
|
||||
# for ns, handler in self.iteritems():
|
||||
@@ -116,11 +118,11 @@ class NamespaceSet(dict):
|
||||
else:
|
||||
for k, v in ns_confs.get(ns, {}).items():
|
||||
handler(k, v)
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
return "%s.%s(%s)" % (self.__module__, self.__class__.__name__,
|
||||
dict.__repr__(self))
|
||||
|
||||
|
||||
def __copy__(self):
|
||||
newobj = self.__class__()
|
||||
newobj.update(self)
|
||||
@@ -129,27 +131,28 @@ class NamespaceSet(dict):
|
||||
|
||||
|
||||
class Config(dict):
|
||||
|
||||
"""A dict-like set of configuration data, with defaults and namespaces.
|
||||
|
||||
|
||||
May take a file, filename, or dict.
|
||||
"""
|
||||
|
||||
|
||||
defaults = {}
|
||||
environments = {}
|
||||
namespaces = NamespaceSet()
|
||||
|
||||
|
||||
def __init__(self, file=None, **kwargs):
|
||||
self.reset()
|
||||
if file is not None:
|
||||
self.update(file)
|
||||
if kwargs:
|
||||
self.update(kwargs)
|
||||
|
||||
|
||||
def reset(self):
|
||||
"""Reset self to default values."""
|
||||
self.clear()
|
||||
dict.update(self, self.defaults)
|
||||
|
||||
|
||||
def update(self, config):
|
||||
"""Update self from a dict, file or filename."""
|
||||
if isinstance(config, basestring):
|
||||
@@ -161,7 +164,7 @@ class Config(dict):
|
||||
else:
|
||||
config = config.copy()
|
||||
self._apply(config)
|
||||
|
||||
|
||||
def _apply(self, config):
|
||||
"""Update self from a dict."""
|
||||
which_env = config.get('environment')
|
||||
@@ -170,23 +173,24 @@ class Config(dict):
|
||||
for k in env:
|
||||
if k not in config:
|
||||
config[k] = env[k]
|
||||
|
||||
|
||||
dict.update(self, config)
|
||||
self.namespaces(config)
|
||||
|
||||
|
||||
def __setitem__(self, k, v):
|
||||
dict.__setitem__(self, k, v)
|
||||
self.namespaces({k: v})
|
||||
|
||||
|
||||
class Parser(ConfigParser):
|
||||
"""Sub-class of ConfigParser that keeps the case of options and that
|
||||
|
||||
"""Sub-class of ConfigParser that keeps the case of options and that
|
||||
raises an exception if the file cannot be read.
|
||||
"""
|
||||
|
||||
|
||||
def optionxform(self, optionstr):
|
||||
return optionstr
|
||||
|
||||
|
||||
def read(self, filenames):
|
||||
if isinstance(filenames, basestring):
|
||||
filenames = [filenames]
|
||||
@@ -200,7 +204,7 @@ class Parser(ConfigParser):
|
||||
self._read(fp, filename)
|
||||
finally:
|
||||
fp.close()
|
||||
|
||||
|
||||
def as_dict(self, raw=False, vars=None):
|
||||
"""Convert an INI file to a dictionary"""
|
||||
# Load INI file into a dict
|
||||
@@ -220,7 +224,7 @@ class Parser(ConfigParser):
|
||||
raise ValueError(msg, x.__class__.__name__, x.args)
|
||||
result[section][option] = value
|
||||
return result
|
||||
|
||||
|
||||
def dict_from_file(self, file):
|
||||
if hasattr(file, 'read'):
|
||||
self.readfp(file)
|
||||
@@ -233,14 +237,14 @@ class Parser(ConfigParser):
|
||||
|
||||
|
||||
class _Builder2:
|
||||
|
||||
|
||||
def build(self, o):
|
||||
m = getattr(self, 'build_' + o.__class__.__name__, None)
|
||||
if m is None:
|
||||
raise TypeError("unrepr does not recognize %s" %
|
||||
repr(o.__class__.__name__))
|
||||
return m(o)
|
||||
|
||||
|
||||
def astnode(self, s):
|
||||
"""Return a Python2 ast Node compiled from a string."""
|
||||
try:
|
||||
@@ -249,40 +253,58 @@ class _Builder2:
|
||||
# Fallback to eval when compiler package is not available,
|
||||
# e.g. IronPython 1.0.
|
||||
return eval(s)
|
||||
|
||||
|
||||
p = compiler.parse("__tempvalue__ = " + s)
|
||||
return p.getChildren()[1].getChildren()[0].getChildren()[1]
|
||||
|
||||
|
||||
def build_Subscript(self, o):
|
||||
expr, flags, subs = o.getChildren()
|
||||
expr = self.build(expr)
|
||||
subs = self.build(subs)
|
||||
return expr[subs]
|
||||
|
||||
|
||||
def build_CallFunc(self, o):
|
||||
children = map(self.build, o.getChildren())
|
||||
callee = children.pop(0)
|
||||
kwargs = children.pop() or {}
|
||||
starargs = children.pop() or ()
|
||||
args = tuple(children) + tuple(starargs)
|
||||
children = o.getChildren()
|
||||
# Build callee from first child
|
||||
callee = self.build(children[0])
|
||||
# Build args and kwargs from remaining children
|
||||
args = []
|
||||
kwargs = {}
|
||||
for child in children[1:]:
|
||||
class_name = child.__class__.__name__
|
||||
# None is ignored
|
||||
if class_name == 'NoneType':
|
||||
continue
|
||||
# Keywords become kwargs
|
||||
if class_name == 'Keyword':
|
||||
kwargs.update(self.build(child))
|
||||
# Everything else becomes args
|
||||
else :
|
||||
args.append(self.build(child))
|
||||
return callee(*args, **kwargs)
|
||||
|
||||
|
||||
def build_Keyword(self, o):
|
||||
key, value_obj = o.getChildren()
|
||||
value = self.build(value_obj)
|
||||
kw_dict = {key: value}
|
||||
return kw_dict
|
||||
|
||||
def build_List(self, o):
|
||||
return map(self.build, o.getChildren())
|
||||
|
||||
|
||||
def build_Const(self, o):
|
||||
return o.value
|
||||
|
||||
|
||||
def build_Dict(self, o):
|
||||
d = {}
|
||||
i = iter(map(self.build, o.getChildren()))
|
||||
for el in i:
|
||||
d[el] = i.next()
|
||||
return d
|
||||
|
||||
|
||||
def build_Tuple(self, o):
|
||||
return tuple(self.build_List(o))
|
||||
|
||||
|
||||
def build_Name(self, o):
|
||||
name = o.name
|
||||
if name == 'None':
|
||||
@@ -291,21 +313,21 @@ class _Builder2:
|
||||
return True
|
||||
if name == 'False':
|
||||
return False
|
||||
|
||||
|
||||
# See if the Name is a package or module. If it is, import it.
|
||||
try:
|
||||
return modules(name)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
# See if the Name is in builtins.
|
||||
try:
|
||||
return getattr(builtins, name)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
raise TypeError("unrepr could not resolve the name %s" % repr(name))
|
||||
|
||||
|
||||
def build_Add(self, o):
|
||||
left, right = map(self.build, o.getChildren())
|
||||
return left + right
|
||||
@@ -313,30 +335,30 @@ class _Builder2:
|
||||
def build_Mul(self, o):
|
||||
left, right = map(self.build, o.getChildren())
|
||||
return left * right
|
||||
|
||||
|
||||
def build_Getattr(self, o):
|
||||
parent = self.build(o.expr)
|
||||
return getattr(parent, o.attrname)
|
||||
|
||||
|
||||
def build_NoneType(self, o):
|
||||
return None
|
||||
|
||||
|
||||
def build_UnarySub(self, o):
|
||||
return -self.build(o.getChildren()[0])
|
||||
|
||||
|
||||
def build_UnaryAdd(self, o):
|
||||
return self.build(o.getChildren()[0])
|
||||
|
||||
|
||||
class _Builder3:
|
||||
|
||||
|
||||
def build(self, o):
|
||||
m = getattr(self, 'build_' + o.__class__.__name__, None)
|
||||
if m is None:
|
||||
raise TypeError("unrepr does not recognize %s" %
|
||||
repr(o.__class__.__name__))
|
||||
return m(o)
|
||||
|
||||
|
||||
def astnode(self, s):
|
||||
"""Return a Python3 ast Node compiled from a string."""
|
||||
try:
|
||||
@@ -351,46 +373,46 @@ class _Builder3:
|
||||
|
||||
def build_Subscript(self, o):
|
||||
return self.build(o.value)[self.build(o.slice)]
|
||||
|
||||
|
||||
def build_Index(self, o):
|
||||
return self.build(o.value)
|
||||
|
||||
|
||||
def build_Call(self, o):
|
||||
callee = self.build(o.func)
|
||||
|
||||
|
||||
if o.args is None:
|
||||
args = ()
|
||||
else:
|
||||
args = tuple([self.build(a) for a in o.args])
|
||||
|
||||
else:
|
||||
args = tuple([self.build(a) for a in o.args])
|
||||
|
||||
if o.starargs is None:
|
||||
starargs = ()
|
||||
else:
|
||||
starargs = self.build(o.starargs)
|
||||
|
||||
|
||||
if o.kwargs is None:
|
||||
kwargs = {}
|
||||
else:
|
||||
kwargs = self.build(o.kwargs)
|
||||
|
||||
|
||||
return callee(*(args + starargs), **kwargs)
|
||||
|
||||
|
||||
def build_List(self, o):
|
||||
return list(map(self.build, o.elts))
|
||||
|
||||
|
||||
def build_Str(self, o):
|
||||
return o.s
|
||||
|
||||
|
||||
def build_Num(self, o):
|
||||
return o.n
|
||||
|
||||
|
||||
def build_Dict(self, o):
|
||||
return dict([(self.build(k), self.build(v))
|
||||
for k, v in zip(o.keys, o.values)])
|
||||
|
||||
|
||||
def build_Tuple(self, o):
|
||||
return tuple(self.build_List(o))
|
||||
|
||||
|
||||
def build_Name(self, o):
|
||||
name = o.id
|
||||
if name == 'None':
|
||||
@@ -399,28 +421,31 @@ class _Builder3:
|
||||
return True
|
||||
if name == 'False':
|
||||
return False
|
||||
|
||||
|
||||
# See if the Name is a package or module. If it is, import it.
|
||||
try:
|
||||
return modules(name)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
# See if the Name is in builtins.
|
||||
try:
|
||||
import builtins
|
||||
return getattr(builtins, name)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
raise TypeError("unrepr could not resolve the name %s" % repr(name))
|
||||
|
||||
|
||||
def build_NameConstant(self, o):
|
||||
return o.value
|
||||
|
||||
def build_UnaryOp(self, o):
|
||||
op, operand = map(self.build, [o.op, o.operand])
|
||||
return op(operand)
|
||||
|
||||
|
||||
def build_BinOp(self, o):
|
||||
left, op, right = map(self.build, [o.left, o.op, o.right])
|
||||
left, op, right = map(self.build, [o.left, o.op, o.right])
|
||||
return op(left, right)
|
||||
|
||||
def build_Add(self, o):
|
||||
@@ -428,7 +453,7 @@ class _Builder3:
|
||||
|
||||
def build_Mult(self, o):
|
||||
return _operator.mul
|
||||
|
||||
|
||||
def build_USub(self, o):
|
||||
return _operator.neg
|
||||
|
||||
@@ -454,23 +479,18 @@ def unrepr(s):
|
||||
|
||||
def modules(modulePath):
|
||||
"""Load a module and retrieve a reference to that module."""
|
||||
try:
|
||||
mod = sys.modules[modulePath]
|
||||
if mod is None:
|
||||
raise KeyError()
|
||||
except KeyError:
|
||||
# The last [''] is important.
|
||||
mod = __import__(modulePath, globals(), locals(), [''])
|
||||
return mod
|
||||
__import__(modulePath)
|
||||
return sys.modules[modulePath]
|
||||
|
||||
|
||||
def attributes(full_attribute_name):
|
||||
"""Load a module and retrieve an attribute of that module."""
|
||||
|
||||
|
||||
# Parse out the path, module, and attribute
|
||||
last_dot = full_attribute_name.rfind(".")
|
||||
attr_name = full_attribute_name[last_dot + 1:]
|
||||
mod_path = full_attribute_name[:last_dot]
|
||||
|
||||
|
||||
mod = modules(mod_path)
|
||||
# Let an AttributeError propagate outward.
|
||||
try:
|
||||
@@ -478,8 +498,6 @@ def attributes(full_attribute_name):
|
||||
except AttributeError:
|
||||
raise AttributeError("'%s' object has no attribute '%s'"
|
||||
% (mod_path, attr_name))
|
||||
|
||||
|
||||
# Return a reference to the attribute.
|
||||
return attr
|
||||
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,40 +1,41 @@
|
||||
import os
|
||||
import re
|
||||
import stat
|
||||
import mimetypes
|
||||
|
||||
try:
|
||||
from io import UnsupportedOperation
|
||||
except ImportError:
|
||||
UnsupportedOperation = object()
|
||||
import logging
|
||||
import mimetypes
|
||||
mimetypes.init()
|
||||
mimetypes.types_map['.dwg']='image/x-dwg'
|
||||
mimetypes.types_map['.ico']='image/x-icon'
|
||||
mimetypes.types_map['.bz2']='application/x-bzip2'
|
||||
mimetypes.types_map['.gz']='application/x-gzip'
|
||||
|
||||
import os
|
||||
import re
|
||||
import stat
|
||||
import time
|
||||
|
||||
import cherrypy
|
||||
from cherrypy._cpcompat import ntob, unquote
|
||||
from cherrypy.lib import cptools, httputil, file_generator_limited
|
||||
|
||||
|
||||
def serve_file(path, content_type=None, disposition=None, name=None, debug=False):
|
||||
mimetypes.init()
|
||||
mimetypes.types_map['.dwg'] = 'image/x-dwg'
|
||||
mimetypes.types_map['.ico'] = 'image/x-icon'
|
||||
mimetypes.types_map['.bz2'] = 'application/x-bzip2'
|
||||
mimetypes.types_map['.gz'] = 'application/x-gzip'
|
||||
|
||||
|
||||
def serve_file(path, content_type=None, disposition=None, name=None,
|
||||
debug=False):
|
||||
"""Set status, headers, and body in order to serve the given path.
|
||||
|
||||
|
||||
The Content-Type header will be set to the content_type arg, if provided.
|
||||
If not provided, the Content-Type will be guessed by the file extension
|
||||
of the 'path' argument.
|
||||
|
||||
|
||||
If disposition is not None, the Content-Disposition header will be set
|
||||
to "<disposition>; filename=<name>". If name is None, it will be set
|
||||
to the basename of path. If disposition is None, no Content-Disposition
|
||||
header will be written.
|
||||
"""
|
||||
|
||||
|
||||
response = cherrypy.serving.response
|
||||
|
||||
|
||||
# If path is relative, users should fix it by making path absolute.
|
||||
# That is, CherryPy should not guess where the application root is.
|
||||
# It certainly should *not* use cwd (since CP may be invoked from a
|
||||
@@ -45,26 +46,26 @@ def serve_file(path, content_type=None, disposition=None, name=None, debug=False
|
||||
if debug:
|
||||
cherrypy.log(msg, 'TOOLS.STATICFILE')
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
try:
|
||||
st = os.stat(path)
|
||||
except OSError:
|
||||
if debug:
|
||||
cherrypy.log('os.stat(%r) failed' % path, 'TOOLS.STATIC')
|
||||
raise cherrypy.NotFound()
|
||||
|
||||
|
||||
# Check if path is a directory.
|
||||
if stat.S_ISDIR(st.st_mode):
|
||||
# Let the caller deal with it as they like.
|
||||
if debug:
|
||||
cherrypy.log('%r is a directory' % path, 'TOOLS.STATIC')
|
||||
raise cherrypy.NotFound()
|
||||
|
||||
|
||||
# Set the Last-Modified response header, so that
|
||||
# modified-since validation code can work.
|
||||
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
|
||||
cptools.validate_since()
|
||||
|
||||
|
||||
if content_type is None:
|
||||
# Set content-type based on filename extension
|
||||
ext = ""
|
||||
@@ -76,7 +77,7 @@ def serve_file(path, content_type=None, disposition=None, name=None, debug=False
|
||||
response.headers['Content-Type'] = content_type
|
||||
if debug:
|
||||
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
|
||||
|
||||
|
||||
cd = None
|
||||
if disposition is not None:
|
||||
if name is None:
|
||||
@@ -85,19 +86,20 @@ def serve_file(path, content_type=None, disposition=None, name=None, debug=False
|
||||
response.headers["Content-Disposition"] = cd
|
||||
if debug:
|
||||
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
|
||||
|
||||
|
||||
# Set Content-Length and use an iterable (file object)
|
||||
# this way CP won't load the whole file in memory
|
||||
content_length = st.st_size
|
||||
fileobj = open(path, 'rb')
|
||||
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
|
||||
|
||||
|
||||
def serve_fileobj(fileobj, content_type=None, disposition=None, name=None,
|
||||
debug=False):
|
||||
"""Set status, headers, and body in order to serve the given file object.
|
||||
|
||||
|
||||
The Content-Type header will be set to the content_type arg, if provided.
|
||||
|
||||
|
||||
If disposition is not None, the Content-Disposition header will be set
|
||||
to "<disposition>; filename=<name>". If name is None, 'filename' will
|
||||
not be set. If disposition is None, no Content-Disposition header will
|
||||
@@ -110,9 +112,9 @@ def serve_fileobj(fileobj, content_type=None, disposition=None, name=None,
|
||||
serve_fileobj(), expecting that the data would be served starting from that
|
||||
position.
|
||||
"""
|
||||
|
||||
|
||||
response = cherrypy.serving.response
|
||||
|
||||
|
||||
try:
|
||||
st = os.fstat(fileobj.fileno())
|
||||
except AttributeError:
|
||||
@@ -127,12 +129,12 @@ def serve_fileobj(fileobj, content_type=None, disposition=None, name=None,
|
||||
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
|
||||
cptools.validate_since()
|
||||
content_length = st.st_size
|
||||
|
||||
|
||||
if content_type is not None:
|
||||
response.headers['Content-Type'] = content_type
|
||||
if debug:
|
||||
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
|
||||
|
||||
|
||||
cd = None
|
||||
if disposition is not None:
|
||||
if name is None:
|
||||
@@ -142,13 +144,14 @@ def serve_fileobj(fileobj, content_type=None, disposition=None, name=None,
|
||||
response.headers["Content-Disposition"] = cd
|
||||
if debug:
|
||||
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
|
||||
|
||||
|
||||
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
|
||||
|
||||
|
||||
def _serve_fileobj(fileobj, content_type, content_length, debug=False):
|
||||
"""Internal. Set response.body to the given file object, perhaps ranged."""
|
||||
response = cherrypy.serving.response
|
||||
|
||||
|
||||
# HTTP/1.0 didn't have Range/Accept-Ranges headers, or the 206 code
|
||||
request = cherrypy.serving.request
|
||||
if request.protocol >= (1, 1):
|
||||
@@ -156,11 +159,12 @@ def _serve_fileobj(fileobj, content_type, content_length, debug=False):
|
||||
r = httputil.get_ranges(request.headers.get('Range'), content_length)
|
||||
if r == []:
|
||||
response.headers['Content-Range'] = "bytes */%s" % content_length
|
||||
message = "Invalid Range (first-byte-pos greater than Content-Length)"
|
||||
message = ("Invalid Range (first-byte-pos greater than "
|
||||
"Content-Length)")
|
||||
if debug:
|
||||
cherrypy.log(message, 'TOOLS.STATIC')
|
||||
raise cherrypy.HTTPError(416, message)
|
||||
|
||||
|
||||
if r:
|
||||
if len(r) == 1:
|
||||
# Return a single-part response.
|
||||
@@ -169,8 +173,9 @@ def _serve_fileobj(fileobj, content_type, content_length, debug=False):
|
||||
stop = content_length
|
||||
r_len = stop - start
|
||||
if debug:
|
||||
cherrypy.log('Single part; start: %r, stop: %r' % (start, stop),
|
||||
'TOOLS.STATIC')
|
||||
cherrypy.log(
|
||||
'Single part; start: %r, stop: %r' % (start, stop),
|
||||
'TOOLS.STATIC')
|
||||
response.status = "206 Partial Content"
|
||||
response.headers['Content-Range'] = (
|
||||
"bytes %s-%s/%s" % (start, stop - 1, content_length))
|
||||
@@ -182,36 +187,42 @@ def _serve_fileobj(fileobj, content_type, content_length, debug=False):
|
||||
response.status = "206 Partial Content"
|
||||
try:
|
||||
# Python 3
|
||||
from email.generator import _make_boundary as choose_boundary
|
||||
from email.generator import _make_boundary as make_boundary
|
||||
except ImportError:
|
||||
# Python 2
|
||||
from mimetools import choose_boundary
|
||||
boundary = choose_boundary()
|
||||
from mimetools import choose_boundary as make_boundary
|
||||
boundary = make_boundary()
|
||||
ct = "multipart/byteranges; boundary=%s" % boundary
|
||||
response.headers['Content-Type'] = ct
|
||||
if "Content-Length" in response.headers:
|
||||
# Delete Content-Length header so finalize() recalcs it.
|
||||
del response.headers["Content-Length"]
|
||||
|
||||
|
||||
def file_ranges():
|
||||
# Apache compatibility:
|
||||
yield ntob("\r\n")
|
||||
|
||||
|
||||
for start, stop in r:
|
||||
if debug:
|
||||
cherrypy.log('Multipart; start: %r, stop: %r' % (start, stop),
|
||||
'TOOLS.STATIC')
|
||||
cherrypy.log(
|
||||
'Multipart; start: %r, stop: %r' % (
|
||||
start, stop),
|
||||
'TOOLS.STATIC')
|
||||
yield ntob("--" + boundary, 'ascii')
|
||||
yield ntob("\r\nContent-type: %s" % content_type, 'ascii')
|
||||
yield ntob("\r\nContent-range: bytes %s-%s/%s\r\n\r\n"
|
||||
% (start, stop - 1, content_length), 'ascii')
|
||||
yield ntob("\r\nContent-type: %s" % content_type,
|
||||
'ascii')
|
||||
yield ntob(
|
||||
"\r\nContent-range: bytes %s-%s/%s\r\n\r\n" % (
|
||||
start, stop - 1, content_length),
|
||||
'ascii')
|
||||
fileobj.seek(start)
|
||||
for chunk in file_generator_limited(fileobj, stop-start):
|
||||
gen = file_generator_limited(fileobj, stop - start)
|
||||
for chunk in gen:
|
||||
yield chunk
|
||||
yield ntob("\r\n")
|
||||
# Final boundary
|
||||
yield ntob("--" + boundary + "--", 'ascii')
|
||||
|
||||
|
||||
# Apache compatibility:
|
||||
yield ntob("\r\n")
|
||||
response.body = file_ranges()
|
||||
@@ -219,13 +230,14 @@ def _serve_fileobj(fileobj, content_type, content_length, debug=False):
|
||||
else:
|
||||
if debug:
|
||||
cherrypy.log('No byteranges requested', 'TOOLS.STATIC')
|
||||
|
||||
|
||||
# Set Content-Length and use an iterable (file object)
|
||||
# this way CP won't load the whole file in memory
|
||||
response.headers['Content-Length'] = content_length
|
||||
response.body = fileobj
|
||||
return response.body
|
||||
|
||||
|
||||
def serve_download(path, name=None):
|
||||
"""Serve 'path' as an application/x-download attachment."""
|
||||
# This is such a common idiom I felt it deserved its own wrapper.
|
||||
@@ -252,20 +264,21 @@ def _attempt(filename, content_types, debug=False):
|
||||
cherrypy.log('NotFound', 'TOOLS.STATICFILE')
|
||||
return False
|
||||
|
||||
|
||||
def staticdir(section, dir, root="", match="", content_types=None, index="",
|
||||
debug=False):
|
||||
"""Serve a static resource from the given (root +) dir.
|
||||
|
||||
|
||||
match
|
||||
If given, request.path_info will be searched for the given
|
||||
regular expression before attempting to serve static content.
|
||||
|
||||
|
||||
content_types
|
||||
If given, it should be a Python dictionary of
|
||||
{file-extension: content-type} pairs, where 'file-extension' is
|
||||
a string (e.g. "gif") and 'content-type' is the value to write
|
||||
out in the Content-Type response header (e.g. "image/gif").
|
||||
|
||||
|
||||
index
|
||||
If provided, it should be the (relative) name of a file to
|
||||
serve for directory requests. For example, if the dir argument is
|
||||
@@ -277,13 +290,13 @@ def staticdir(section, dir, root="", match="", content_types=None, index="",
|
||||
if debug:
|
||||
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICDIR')
|
||||
return False
|
||||
|
||||
|
||||
if match and not re.search(match, request.path_info):
|
||||
if debug:
|
||||
cherrypy.log('request.path_info %r does not match pattern %r' %
|
||||
(request.path_info, match), 'TOOLS.STATICDIR')
|
||||
return False
|
||||
|
||||
|
||||
# Allow the use of '~' to refer to a user's home directory.
|
||||
dir = os.path.expanduser(dir)
|
||||
|
||||
@@ -295,7 +308,7 @@ def staticdir(section, dir, root="", match="", content_types=None, index="",
|
||||
cherrypy.log(msg, 'TOOLS.STATICDIR')
|
||||
raise ValueError(msg)
|
||||
dir = os.path.join(root, dir)
|
||||
|
||||
|
||||
# Determine where we are in the object tree relative to 'section'
|
||||
# (where the static tool was defined).
|
||||
if section == 'global':
|
||||
@@ -303,19 +316,19 @@ def staticdir(section, dir, root="", match="", content_types=None, index="",
|
||||
section = section.rstrip(r"\/")
|
||||
branch = request.path_info[len(section) + 1:]
|
||||
branch = unquote(branch.lstrip(r"\/"))
|
||||
|
||||
|
||||
# If branch is "", filename will end in a slash
|
||||
filename = os.path.join(dir, branch)
|
||||
if debug:
|
||||
cherrypy.log('Checking file %r to fulfill %r' %
|
||||
(filename, request.path_info), 'TOOLS.STATICDIR')
|
||||
|
||||
|
||||
# There's a chance that the branch pulled from the URL might
|
||||
# have ".." or similar uplevel attacks in it. Check that the final
|
||||
# filename is a child of dir.
|
||||
if not os.path.normpath(filename).startswith(os.path.normpath(dir)):
|
||||
raise cherrypy.HTTPError(403) # Forbidden
|
||||
|
||||
raise cherrypy.HTTPError(403) # Forbidden
|
||||
|
||||
handled = _attempt(filename, content_types)
|
||||
if not handled:
|
||||
# Check for an index file if a folder was requested.
|
||||
@@ -325,39 +338,41 @@ def staticdir(section, dir, root="", match="", content_types=None, index="",
|
||||
request.is_index = filename[-1] in (r"\/")
|
||||
return handled
|
||||
|
||||
|
||||
def staticfile(filename, root=None, match="", content_types=None, debug=False):
|
||||
"""Serve a static resource from the given (root +) filename.
|
||||
|
||||
|
||||
match
|
||||
If given, request.path_info will be searched for the given
|
||||
regular expression before attempting to serve static content.
|
||||
|
||||
|
||||
content_types
|
||||
If given, it should be a Python dictionary of
|
||||
{file-extension: content-type} pairs, where 'file-extension' is
|
||||
a string (e.g. "gif") and 'content-type' is the value to write
|
||||
out in the Content-Type response header (e.g. "image/gif").
|
||||
|
||||
|
||||
"""
|
||||
request = cherrypy.serving.request
|
||||
if request.method not in ('GET', 'HEAD'):
|
||||
if debug:
|
||||
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICFILE')
|
||||
return False
|
||||
|
||||
|
||||
if match and not re.search(match, request.path_info):
|
||||
if debug:
|
||||
cherrypy.log('request.path_info %r does not match pattern %r' %
|
||||
(request.path_info, match), 'TOOLS.STATICFILE')
|
||||
return False
|
||||
|
||||
|
||||
# If filename is relative, make absolute using "root".
|
||||
if not os.path.isabs(filename):
|
||||
if not root:
|
||||
msg = "Static tool requires an absolute filename (got '%s')." % filename
|
||||
msg = "Static tool requires an absolute filename (got '%s')." % (
|
||||
filename,)
|
||||
if debug:
|
||||
cherrypy.log(msg, 'TOOLS.STATICFILE')
|
||||
raise ValueError(msg)
|
||||
filename = os.path.join(root, filename)
|
||||
|
||||
|
||||
return _attempt(filename, content_types, debug=debug)
|
||||
|
||||
@@ -3,6 +3,7 @@ import sys
|
||||
import cherrypy
|
||||
from cherrypy._cpcompat import ntob
|
||||
|
||||
|
||||
def get_xmlrpclib():
|
||||
try:
|
||||
import xmlrpc.client as x
|
||||
@@ -10,6 +11,7 @@ def get_xmlrpclib():
|
||||
import xmlrpclib as x
|
||||
return x
|
||||
|
||||
|
||||
def process_body():
|
||||
"""Return (params, method) from request body."""
|
||||
try:
|
||||
@@ -48,8 +50,8 @@ def respond(body, encoding='utf-8', allow_none=0):
|
||||
encoding=encoding,
|
||||
allow_none=allow_none))
|
||||
|
||||
|
||||
def on_error(*args, **kwargs):
|
||||
body = str(sys.exc_info()[1])
|
||||
xmlrpclib = get_xmlrpclib()
|
||||
_set_response(xmlrpclib.dumps(xmlrpclib.Fault(1, body)))
|
||||
|
||||
|
||||
Reference in New Issue
Block a user