Convert watch page to flask framework
This commit is contained in:
233
python/werkzeug/__init__.py
Normal file
233
python/werkzeug/__init__.py
Normal file
@@ -0,0 +1,233 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug
|
||||
~~~~~~~~
|
||||
|
||||
Werkzeug is the Swiss Army knife of Python web development.
|
||||
|
||||
It provides useful classes and functions for any WSGI application to make
|
||||
the life of a python web developer much easier. All of the provided
|
||||
classes are independent from each other so you can mix it with any other
|
||||
library.
|
||||
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import sys
|
||||
from types import ModuleType
|
||||
|
||||
__version__ = "0.15.4"
|
||||
|
||||
# This import magic raises concerns quite often which is why the implementation
|
||||
# and motivation is explained here in detail now.
|
||||
#
|
||||
# The majority of the functions and classes provided by Werkzeug work on the
|
||||
# HTTP and WSGI layer. There is no useful grouping for those which is why
|
||||
# they are all importable from "werkzeug" instead of the modules where they are
|
||||
# implemented. The downside of that is, that now everything would be loaded at
|
||||
# once, even if unused.
|
||||
#
|
||||
# The implementation of a lazy-loading module in this file replaces the
|
||||
# werkzeug package when imported from within. Attribute access to the werkzeug
|
||||
# module will then lazily import from the modules that implement the objects.
|
||||
|
||||
# import mapping to objects in other modules
|
||||
all_by_module = {
|
||||
"werkzeug.debug": ["DebuggedApplication"],
|
||||
"werkzeug.local": [
|
||||
"Local",
|
||||
"LocalManager",
|
||||
"LocalProxy",
|
||||
"LocalStack",
|
||||
"release_local",
|
||||
],
|
||||
"werkzeug.serving": ["run_simple"],
|
||||
"werkzeug.test": ["Client", "EnvironBuilder", "create_environ", "run_wsgi_app"],
|
||||
"werkzeug.testapp": ["test_app"],
|
||||
"werkzeug.exceptions": ["abort", "Aborter"],
|
||||
"werkzeug.urls": [
|
||||
"url_decode",
|
||||
"url_encode",
|
||||
"url_quote",
|
||||
"url_quote_plus",
|
||||
"url_unquote",
|
||||
"url_unquote_plus",
|
||||
"url_fix",
|
||||
"Href",
|
||||
"iri_to_uri",
|
||||
"uri_to_iri",
|
||||
],
|
||||
"werkzeug.formparser": ["parse_form_data"],
|
||||
"werkzeug.utils": [
|
||||
"escape",
|
||||
"environ_property",
|
||||
"append_slash_redirect",
|
||||
"redirect",
|
||||
"cached_property",
|
||||
"import_string",
|
||||
"dump_cookie",
|
||||
"parse_cookie",
|
||||
"unescape",
|
||||
"format_string",
|
||||
"find_modules",
|
||||
"header_property",
|
||||
"html",
|
||||
"xhtml",
|
||||
"HTMLBuilder",
|
||||
"validate_arguments",
|
||||
"ArgumentValidationError",
|
||||
"bind_arguments",
|
||||
"secure_filename",
|
||||
],
|
||||
"werkzeug.wsgi": [
|
||||
"get_current_url",
|
||||
"get_host",
|
||||
"pop_path_info",
|
||||
"peek_path_info",
|
||||
"ClosingIterator",
|
||||
"FileWrapper",
|
||||
"make_line_iter",
|
||||
"LimitedStream",
|
||||
"responder",
|
||||
"wrap_file",
|
||||
"extract_path_info",
|
||||
],
|
||||
"werkzeug.datastructures": [
|
||||
"MultiDict",
|
||||
"CombinedMultiDict",
|
||||
"Headers",
|
||||
"EnvironHeaders",
|
||||
"ImmutableList",
|
||||
"ImmutableDict",
|
||||
"ImmutableMultiDict",
|
||||
"TypeConversionDict",
|
||||
"ImmutableTypeConversionDict",
|
||||
"Accept",
|
||||
"MIMEAccept",
|
||||
"CharsetAccept",
|
||||
"LanguageAccept",
|
||||
"RequestCacheControl",
|
||||
"ResponseCacheControl",
|
||||
"ETags",
|
||||
"HeaderSet",
|
||||
"WWWAuthenticate",
|
||||
"Authorization",
|
||||
"FileMultiDict",
|
||||
"CallbackDict",
|
||||
"FileStorage",
|
||||
"OrderedMultiDict",
|
||||
"ImmutableOrderedMultiDict",
|
||||
],
|
||||
"werkzeug.useragents": ["UserAgent"],
|
||||
"werkzeug.http": [
|
||||
"parse_etags",
|
||||
"parse_date",
|
||||
"http_date",
|
||||
"cookie_date",
|
||||
"parse_cache_control_header",
|
||||
"is_resource_modified",
|
||||
"parse_accept_header",
|
||||
"parse_set_header",
|
||||
"quote_etag",
|
||||
"unquote_etag",
|
||||
"generate_etag",
|
||||
"dump_header",
|
||||
"parse_list_header",
|
||||
"parse_dict_header",
|
||||
"parse_authorization_header",
|
||||
"parse_www_authenticate_header",
|
||||
"remove_entity_headers",
|
||||
"is_entity_header",
|
||||
"remove_hop_by_hop_headers",
|
||||
"parse_options_header",
|
||||
"dump_options_header",
|
||||
"is_hop_by_hop_header",
|
||||
"unquote_header_value",
|
||||
"quote_header_value",
|
||||
"HTTP_STATUS_CODES",
|
||||
],
|
||||
"werkzeug.wrappers": [
|
||||
"BaseResponse",
|
||||
"BaseRequest",
|
||||
"Request",
|
||||
"Response",
|
||||
"AcceptMixin",
|
||||
"ETagRequestMixin",
|
||||
"ETagResponseMixin",
|
||||
"ResponseStreamMixin",
|
||||
"CommonResponseDescriptorsMixin",
|
||||
"UserAgentMixin",
|
||||
"AuthorizationMixin",
|
||||
"WWWAuthenticateMixin",
|
||||
"CommonRequestDescriptorsMixin",
|
||||
],
|
||||
"werkzeug.middleware.dispatcher": ["DispatcherMiddleware"],
|
||||
"werkzeug.middleware.shared_data": ["SharedDataMiddleware"],
|
||||
"werkzeug.security": ["generate_password_hash", "check_password_hash"],
|
||||
# the undocumented easteregg ;-)
|
||||
"werkzeug._internal": ["_easteregg"],
|
||||
}
|
||||
|
||||
# modules that should be imported when accessed as attributes of werkzeug
|
||||
attribute_modules = frozenset(["exceptions", "routing"])
|
||||
|
||||
object_origins = {}
|
||||
for module, items in all_by_module.items():
|
||||
for item in items:
|
||||
object_origins[item] = module
|
||||
|
||||
|
||||
class module(ModuleType):
|
||||
"""Automatically import objects from the modules."""
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name in object_origins:
|
||||
module = __import__(object_origins[name], None, None, [name])
|
||||
for extra_name in all_by_module[module.__name__]:
|
||||
setattr(self, extra_name, getattr(module, extra_name))
|
||||
return getattr(module, name)
|
||||
elif name in attribute_modules:
|
||||
__import__("werkzeug." + name)
|
||||
return ModuleType.__getattribute__(self, name)
|
||||
|
||||
def __dir__(self):
|
||||
"""Just show what we want to show."""
|
||||
result = list(new_module.__all__)
|
||||
result.extend(
|
||||
(
|
||||
"__file__",
|
||||
"__doc__",
|
||||
"__all__",
|
||||
"__docformat__",
|
||||
"__name__",
|
||||
"__path__",
|
||||
"__package__",
|
||||
"__version__",
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
# keep a reference to this module so that it's not garbage collected
|
||||
old_module = sys.modules["werkzeug"]
|
||||
|
||||
|
||||
# setup the new module and patch it into the dict of loaded modules
|
||||
new_module = sys.modules["werkzeug"] = module("werkzeug")
|
||||
new_module.__dict__.update(
|
||||
{
|
||||
"__file__": __file__,
|
||||
"__package__": "werkzeug",
|
||||
"__path__": __path__,
|
||||
"__doc__": __doc__,
|
||||
"__version__": __version__,
|
||||
"__all__": tuple(object_origins) + tuple(attribute_modules),
|
||||
"__docformat__": "restructuredtext en",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
# Due to bootstrapping issues we need to import exceptions here.
|
||||
# Don't ask :-(
|
||||
__import__("werkzeug.exceptions")
|
||||
219
python/werkzeug/_compat.py
Normal file
219
python/werkzeug/_compat.py
Normal file
@@ -0,0 +1,219 @@
|
||||
# flake8: noqa
|
||||
# This whole file is full of lint errors
|
||||
import functools
|
||||
import operator
|
||||
import sys
|
||||
|
||||
try:
|
||||
import builtins
|
||||
except ImportError:
|
||||
import __builtin__ as builtins
|
||||
|
||||
|
||||
PY2 = sys.version_info[0] == 2
|
||||
WIN = sys.platform.startswith("win")
|
||||
|
||||
_identity = lambda x: x
|
||||
|
||||
if PY2:
|
||||
unichr = unichr
|
||||
text_type = unicode
|
||||
string_types = (str, unicode)
|
||||
integer_types = (int, long)
|
||||
|
||||
iterkeys = lambda d, *args, **kwargs: d.iterkeys(*args, **kwargs)
|
||||
itervalues = lambda d, *args, **kwargs: d.itervalues(*args, **kwargs)
|
||||
iteritems = lambda d, *args, **kwargs: d.iteritems(*args, **kwargs)
|
||||
|
||||
iterlists = lambda d, *args, **kwargs: d.iterlists(*args, **kwargs)
|
||||
iterlistvalues = lambda d, *args, **kwargs: d.iterlistvalues(*args, **kwargs)
|
||||
|
||||
int_to_byte = chr
|
||||
iter_bytes = iter
|
||||
|
||||
import collections as collections_abc
|
||||
|
||||
exec("def reraise(tp, value, tb=None):\n raise tp, value, tb")
|
||||
|
||||
def fix_tuple_repr(obj):
|
||||
def __repr__(self):
|
||||
cls = self.__class__
|
||||
return "%s(%s)" % (
|
||||
cls.__name__,
|
||||
", ".join(
|
||||
"%s=%r" % (field, self[index])
|
||||
for index, field in enumerate(cls._fields)
|
||||
),
|
||||
)
|
||||
|
||||
obj.__repr__ = __repr__
|
||||
return obj
|
||||
|
||||
def implements_iterator(cls):
|
||||
cls.next = cls.__next__
|
||||
del cls.__next__
|
||||
return cls
|
||||
|
||||
def implements_to_string(cls):
|
||||
cls.__unicode__ = cls.__str__
|
||||
cls.__str__ = lambda x: x.__unicode__().encode("utf-8")
|
||||
return cls
|
||||
|
||||
def native_string_result(func):
|
||||
def wrapper(*args, **kwargs):
|
||||
return func(*args, **kwargs).encode("utf-8")
|
||||
|
||||
return functools.update_wrapper(wrapper, func)
|
||||
|
||||
def implements_bool(cls):
|
||||
cls.__nonzero__ = cls.__bool__
|
||||
del cls.__bool__
|
||||
return cls
|
||||
|
||||
from itertools import imap, izip, ifilter
|
||||
|
||||
range_type = xrange
|
||||
|
||||
from StringIO import StringIO
|
||||
from cStringIO import StringIO as BytesIO
|
||||
|
||||
NativeStringIO = BytesIO
|
||||
|
||||
def make_literal_wrapper(reference):
|
||||
return _identity
|
||||
|
||||
def normalize_string_tuple(tup):
|
||||
"""Normalizes a string tuple to a common type. Following Python 2
|
||||
rules, upgrades to unicode are implicit.
|
||||
"""
|
||||
if any(isinstance(x, text_type) for x in tup):
|
||||
return tuple(to_unicode(x) for x in tup)
|
||||
return tup
|
||||
|
||||
def try_coerce_native(s):
|
||||
"""Try to coerce a unicode string to native if possible. Otherwise,
|
||||
leave it as unicode.
|
||||
"""
|
||||
try:
|
||||
return to_native(s)
|
||||
except UnicodeError:
|
||||
return s
|
||||
|
||||
wsgi_get_bytes = _identity
|
||||
|
||||
def wsgi_decoding_dance(s, charset="utf-8", errors="replace"):
|
||||
return s.decode(charset, errors)
|
||||
|
||||
def wsgi_encoding_dance(s, charset="utf-8", errors="replace"):
|
||||
if isinstance(s, bytes):
|
||||
return s
|
||||
return s.encode(charset, errors)
|
||||
|
||||
def to_bytes(x, charset=sys.getdefaultencoding(), errors="strict"):
|
||||
if x is None:
|
||||
return None
|
||||
if isinstance(x, (bytes, bytearray, buffer)):
|
||||
return bytes(x)
|
||||
if isinstance(x, unicode):
|
||||
return x.encode(charset, errors)
|
||||
raise TypeError("Expected bytes")
|
||||
|
||||
def to_native(x, charset=sys.getdefaultencoding(), errors="strict"):
|
||||
if x is None or isinstance(x, str):
|
||||
return x
|
||||
return x.encode(charset, errors)
|
||||
|
||||
|
||||
else:
|
||||
unichr = chr
|
||||
text_type = str
|
||||
string_types = (str,)
|
||||
integer_types = (int,)
|
||||
|
||||
iterkeys = lambda d, *args, **kwargs: iter(d.keys(*args, **kwargs))
|
||||
itervalues = lambda d, *args, **kwargs: iter(d.values(*args, **kwargs))
|
||||
iteritems = lambda d, *args, **kwargs: iter(d.items(*args, **kwargs))
|
||||
|
||||
iterlists = lambda d, *args, **kwargs: iter(d.lists(*args, **kwargs))
|
||||
iterlistvalues = lambda d, *args, **kwargs: iter(d.listvalues(*args, **kwargs))
|
||||
|
||||
int_to_byte = operator.methodcaller("to_bytes", 1, "big")
|
||||
iter_bytes = functools.partial(map, int_to_byte)
|
||||
|
||||
import collections.abc as collections_abc
|
||||
|
||||
def reraise(tp, value, tb=None):
|
||||
if value.__traceback__ is not tb:
|
||||
raise value.with_traceback(tb)
|
||||
raise value
|
||||
|
||||
fix_tuple_repr = _identity
|
||||
implements_iterator = _identity
|
||||
implements_to_string = _identity
|
||||
implements_bool = _identity
|
||||
native_string_result = _identity
|
||||
imap = map
|
||||
izip = zip
|
||||
ifilter = filter
|
||||
range_type = range
|
||||
|
||||
from io import StringIO, BytesIO
|
||||
|
||||
NativeStringIO = StringIO
|
||||
|
||||
_latin1_encode = operator.methodcaller("encode", "latin1")
|
||||
|
||||
def make_literal_wrapper(reference):
|
||||
if isinstance(reference, text_type):
|
||||
return _identity
|
||||
return _latin1_encode
|
||||
|
||||
def normalize_string_tuple(tup):
|
||||
"""Ensures that all types in the tuple are either strings
|
||||
or bytes.
|
||||
"""
|
||||
tupiter = iter(tup)
|
||||
is_text = isinstance(next(tupiter, None), text_type)
|
||||
for arg in tupiter:
|
||||
if isinstance(arg, text_type) != is_text:
|
||||
raise TypeError(
|
||||
"Cannot mix str and bytes arguments (got %s)" % repr(tup)
|
||||
)
|
||||
return tup
|
||||
|
||||
try_coerce_native = _identity
|
||||
wsgi_get_bytes = _latin1_encode
|
||||
|
||||
def wsgi_decoding_dance(s, charset="utf-8", errors="replace"):
|
||||
return s.encode("latin1").decode(charset, errors)
|
||||
|
||||
def wsgi_encoding_dance(s, charset="utf-8", errors="replace"):
|
||||
if isinstance(s, text_type):
|
||||
s = s.encode(charset)
|
||||
return s.decode("latin1", errors)
|
||||
|
||||
def to_bytes(x, charset=sys.getdefaultencoding(), errors="strict"):
|
||||
if x is None:
|
||||
return None
|
||||
if isinstance(x, (bytes, bytearray, memoryview)): # noqa
|
||||
return bytes(x)
|
||||
if isinstance(x, str):
|
||||
return x.encode(charset, errors)
|
||||
raise TypeError("Expected bytes")
|
||||
|
||||
def to_native(x, charset=sys.getdefaultencoding(), errors="strict"):
|
||||
if x is None or isinstance(x, str):
|
||||
return x
|
||||
return x.decode(charset, errors)
|
||||
|
||||
|
||||
def to_unicode(
|
||||
x, charset=sys.getdefaultencoding(), errors="strict", allow_none_charset=False
|
||||
):
|
||||
if x is None:
|
||||
return None
|
||||
if not isinstance(x, bytes):
|
||||
return text_type(x)
|
||||
if charset is None and allow_none_charset:
|
||||
return x
|
||||
return x.decode(charset, errors)
|
||||
484
python/werkzeug/_internal.py
Normal file
484
python/werkzeug/_internal.py
Normal file
@@ -0,0 +1,484 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug._internal
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module provides internally used helpers and constants.
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import inspect
|
||||
import logging
|
||||
import re
|
||||
import string
|
||||
from datetime import date
|
||||
from datetime import datetime
|
||||
from itertools import chain
|
||||
from weakref import WeakKeyDictionary
|
||||
|
||||
from ._compat import int_to_byte
|
||||
from ._compat import integer_types
|
||||
from ._compat import iter_bytes
|
||||
from ._compat import range_type
|
||||
from ._compat import text_type
|
||||
|
||||
|
||||
_logger = None
|
||||
_signature_cache = WeakKeyDictionary()
|
||||
_epoch_ord = date(1970, 1, 1).toordinal()
|
||||
_cookie_params = {
|
||||
b"expires",
|
||||
b"path",
|
||||
b"comment",
|
||||
b"max-age",
|
||||
b"secure",
|
||||
b"httponly",
|
||||
b"version",
|
||||
}
|
||||
_legal_cookie_chars = (
|
||||
string.ascii_letters + string.digits + u"/=!#$%&'*+-.^_`|~:"
|
||||
).encode("ascii")
|
||||
|
||||
_cookie_quoting_map = {b",": b"\\054", b";": b"\\073", b'"': b'\\"', b"\\": b"\\\\"}
|
||||
for _i in chain(range_type(32), range_type(127, 256)):
|
||||
_cookie_quoting_map[int_to_byte(_i)] = ("\\%03o" % _i).encode("latin1")
|
||||
|
||||
_octal_re = re.compile(br"\\[0-3][0-7][0-7]")
|
||||
_quote_re = re.compile(br"[\\].")
|
||||
_legal_cookie_chars_re = br"[\w\d!#%&\'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
|
||||
_cookie_re = re.compile(
|
||||
br"""
|
||||
(?P<key>[^=;]+)
|
||||
(?:\s*=\s*
|
||||
(?P<val>
|
||||
"(?:[^\\"]|\\.)*" |
|
||||
(?:.*?)
|
||||
)
|
||||
)?
|
||||
\s*;
|
||||
""",
|
||||
flags=re.VERBOSE,
|
||||
)
|
||||
|
||||
|
||||
class _Missing(object):
|
||||
def __repr__(self):
|
||||
return "no value"
|
||||
|
||||
def __reduce__(self):
|
||||
return "_missing"
|
||||
|
||||
|
||||
_missing = _Missing()
|
||||
|
||||
|
||||
def _get_environ(obj):
|
||||
env = getattr(obj, "environ", obj)
|
||||
assert isinstance(env, dict), (
|
||||
"%r is not a WSGI environment (has to be a dict)" % type(obj).__name__
|
||||
)
|
||||
return env
|
||||
|
||||
|
||||
def _has_level_handler(logger):
|
||||
"""Check if there is a handler in the logging chain that will handle
|
||||
the given logger's effective level.
|
||||
"""
|
||||
level = logger.getEffectiveLevel()
|
||||
current = logger
|
||||
|
||||
while current:
|
||||
if any(handler.level <= level for handler in current.handlers):
|
||||
return True
|
||||
|
||||
if not current.propagate:
|
||||
break
|
||||
|
||||
current = current.parent
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _log(type, message, *args, **kwargs):
|
||||
"""Log a message to the 'werkzeug' logger.
|
||||
|
||||
The logger is created the first time it is needed. If there is no
|
||||
level set, it is set to :data:`logging.INFO`. If there is no handler
|
||||
for the logger's effective level, a :class:`logging.StreamHandler`
|
||||
is added.
|
||||
"""
|
||||
global _logger
|
||||
|
||||
if _logger is None:
|
||||
_logger = logging.getLogger("werkzeug")
|
||||
|
||||
if _logger.level == logging.NOTSET:
|
||||
_logger.setLevel(logging.INFO)
|
||||
|
||||
if not _has_level_handler(_logger):
|
||||
_logger.addHandler(logging.StreamHandler())
|
||||
|
||||
getattr(_logger, type)(message.rstrip(), *args, **kwargs)
|
||||
|
||||
|
||||
def _parse_signature(func):
|
||||
"""Return a signature object for the function."""
|
||||
if hasattr(func, "im_func"):
|
||||
func = func.im_func
|
||||
|
||||
# if we have a cached validator for this function, return it
|
||||
parse = _signature_cache.get(func)
|
||||
if parse is not None:
|
||||
return parse
|
||||
|
||||
# inspect the function signature and collect all the information
|
||||
if hasattr(inspect, "getfullargspec"):
|
||||
tup = inspect.getfullargspec(func)
|
||||
else:
|
||||
tup = inspect.getargspec(func)
|
||||
positional, vararg_var, kwarg_var, defaults = tup[:4]
|
||||
defaults = defaults or ()
|
||||
arg_count = len(positional)
|
||||
arguments = []
|
||||
for idx, name in enumerate(positional):
|
||||
if isinstance(name, list):
|
||||
raise TypeError(
|
||||
"cannot parse functions that unpack tuples in the function signature"
|
||||
)
|
||||
try:
|
||||
default = defaults[idx - arg_count]
|
||||
except IndexError:
|
||||
param = (name, False, None)
|
||||
else:
|
||||
param = (name, True, default)
|
||||
arguments.append(param)
|
||||
arguments = tuple(arguments)
|
||||
|
||||
def parse(args, kwargs):
|
||||
new_args = []
|
||||
missing = []
|
||||
extra = {}
|
||||
|
||||
# consume as many arguments as positional as possible
|
||||
for idx, (name, has_default, default) in enumerate(arguments):
|
||||
try:
|
||||
new_args.append(args[idx])
|
||||
except IndexError:
|
||||
try:
|
||||
new_args.append(kwargs.pop(name))
|
||||
except KeyError:
|
||||
if has_default:
|
||||
new_args.append(default)
|
||||
else:
|
||||
missing.append(name)
|
||||
else:
|
||||
if name in kwargs:
|
||||
extra[name] = kwargs.pop(name)
|
||||
|
||||
# handle extra arguments
|
||||
extra_positional = args[arg_count:]
|
||||
if vararg_var is not None:
|
||||
new_args.extend(extra_positional)
|
||||
extra_positional = ()
|
||||
if kwargs and kwarg_var is None:
|
||||
extra.update(kwargs)
|
||||
kwargs = {}
|
||||
|
||||
return (
|
||||
new_args,
|
||||
kwargs,
|
||||
missing,
|
||||
extra,
|
||||
extra_positional,
|
||||
arguments,
|
||||
vararg_var,
|
||||
kwarg_var,
|
||||
)
|
||||
|
||||
_signature_cache[func] = parse
|
||||
return parse
|
||||
|
||||
|
||||
def _date_to_unix(arg):
|
||||
"""Converts a timetuple, integer or datetime object into the seconds from
|
||||
epoch in utc.
|
||||
"""
|
||||
if isinstance(arg, datetime):
|
||||
arg = arg.utctimetuple()
|
||||
elif isinstance(arg, integer_types + (float,)):
|
||||
return int(arg)
|
||||
year, month, day, hour, minute, second = arg[:6]
|
||||
days = date(year, month, 1).toordinal() - _epoch_ord + day - 1
|
||||
hours = days * 24 + hour
|
||||
minutes = hours * 60 + minute
|
||||
seconds = minutes * 60 + second
|
||||
return seconds
|
||||
|
||||
|
||||
class _DictAccessorProperty(object):
|
||||
"""Baseclass for `environ_property` and `header_property`."""
|
||||
|
||||
read_only = False
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
default=None,
|
||||
load_func=None,
|
||||
dump_func=None,
|
||||
read_only=None,
|
||||
doc=None,
|
||||
):
|
||||
self.name = name
|
||||
self.default = default
|
||||
self.load_func = load_func
|
||||
self.dump_func = dump_func
|
||||
if read_only is not None:
|
||||
self.read_only = read_only
|
||||
self.__doc__ = doc
|
||||
|
||||
def __get__(self, obj, type=None):
|
||||
if obj is None:
|
||||
return self
|
||||
storage = self.lookup(obj)
|
||||
if self.name not in storage:
|
||||
return self.default
|
||||
rv = storage[self.name]
|
||||
if self.load_func is not None:
|
||||
try:
|
||||
rv = self.load_func(rv)
|
||||
except (ValueError, TypeError):
|
||||
rv = self.default
|
||||
return rv
|
||||
|
||||
def __set__(self, obj, value):
|
||||
if self.read_only:
|
||||
raise AttributeError("read only property")
|
||||
if self.dump_func is not None:
|
||||
value = self.dump_func(value)
|
||||
self.lookup(obj)[self.name] = value
|
||||
|
||||
def __delete__(self, obj):
|
||||
if self.read_only:
|
||||
raise AttributeError("read only property")
|
||||
self.lookup(obj).pop(self.name, None)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s %s>" % (self.__class__.__name__, self.name)
|
||||
|
||||
|
||||
def _cookie_quote(b):
|
||||
buf = bytearray()
|
||||
all_legal = True
|
||||
_lookup = _cookie_quoting_map.get
|
||||
_push = buf.extend
|
||||
|
||||
for char in iter_bytes(b):
|
||||
if char not in _legal_cookie_chars:
|
||||
all_legal = False
|
||||
char = _lookup(char, char)
|
||||
_push(char)
|
||||
|
||||
if all_legal:
|
||||
return bytes(buf)
|
||||
return bytes(b'"' + buf + b'"')
|
||||
|
||||
|
||||
def _cookie_unquote(b):
|
||||
if len(b) < 2:
|
||||
return b
|
||||
if b[:1] != b'"' or b[-1:] != b'"':
|
||||
return b
|
||||
|
||||
b = b[1:-1]
|
||||
|
||||
i = 0
|
||||
n = len(b)
|
||||
rv = bytearray()
|
||||
_push = rv.extend
|
||||
|
||||
while 0 <= i < n:
|
||||
o_match = _octal_re.search(b, i)
|
||||
q_match = _quote_re.search(b, i)
|
||||
if not o_match and not q_match:
|
||||
rv.extend(b[i:])
|
||||
break
|
||||
j = k = -1
|
||||
if o_match:
|
||||
j = o_match.start(0)
|
||||
if q_match:
|
||||
k = q_match.start(0)
|
||||
if q_match and (not o_match or k < j):
|
||||
_push(b[i:k])
|
||||
_push(b[k + 1 : k + 2])
|
||||
i = k + 2
|
||||
else:
|
||||
_push(b[i:j])
|
||||
rv.append(int(b[j + 1 : j + 4], 8))
|
||||
i = j + 4
|
||||
|
||||
return bytes(rv)
|
||||
|
||||
|
||||
def _cookie_parse_impl(b):
|
||||
"""Lowlevel cookie parsing facility that operates on bytes."""
|
||||
i = 0
|
||||
n = len(b)
|
||||
|
||||
while i < n:
|
||||
match = _cookie_re.search(b + b";", i)
|
||||
if not match:
|
||||
break
|
||||
|
||||
key = match.group("key").strip()
|
||||
value = match.group("val") or b""
|
||||
i = match.end(0)
|
||||
|
||||
# Ignore parameters. We have no interest in them.
|
||||
if key.lower() not in _cookie_params:
|
||||
yield _cookie_unquote(key), _cookie_unquote(value)
|
||||
|
||||
|
||||
def _encode_idna(domain):
|
||||
# If we're given bytes, make sure they fit into ASCII
|
||||
if not isinstance(domain, text_type):
|
||||
domain.decode("ascii")
|
||||
return domain
|
||||
|
||||
# Otherwise check if it's already ascii, then return
|
||||
try:
|
||||
return domain.encode("ascii")
|
||||
except UnicodeError:
|
||||
pass
|
||||
|
||||
# Otherwise encode each part separately
|
||||
parts = domain.split(".")
|
||||
for idx, part in enumerate(parts):
|
||||
parts[idx] = part.encode("idna")
|
||||
return b".".join(parts)
|
||||
|
||||
|
||||
def _decode_idna(domain):
|
||||
# If the input is a string try to encode it to ascii to
|
||||
# do the idna decoding. if that fails because of an
|
||||
# unicode error, then we already have a decoded idna domain
|
||||
if isinstance(domain, text_type):
|
||||
try:
|
||||
domain = domain.encode("ascii")
|
||||
except UnicodeError:
|
||||
return domain
|
||||
|
||||
# Decode each part separately. If a part fails, try to
|
||||
# decode it with ascii and silently ignore errors. This makes
|
||||
# most sense because the idna codec does not have error handling
|
||||
parts = domain.split(b".")
|
||||
for idx, part in enumerate(parts):
|
||||
try:
|
||||
parts[idx] = part.decode("idna")
|
||||
except UnicodeError:
|
||||
parts[idx] = part.decode("ascii", "ignore")
|
||||
|
||||
return ".".join(parts)
|
||||
|
||||
|
||||
def _make_cookie_domain(domain):
|
||||
if domain is None:
|
||||
return None
|
||||
domain = _encode_idna(domain)
|
||||
if b":" in domain:
|
||||
domain = domain.split(b":", 1)[0]
|
||||
if b"." in domain:
|
||||
return domain
|
||||
raise ValueError(
|
||||
"Setting 'domain' for a cookie on a server running locally (ex: "
|
||||
"localhost) is not supported by complying browsers. You should "
|
||||
"have something like: '127.0.0.1 localhost dev.localhost' on "
|
||||
"your hosts file and then point your server to run on "
|
||||
"'dev.localhost' and also set 'domain' for 'dev.localhost'"
|
||||
)
|
||||
|
||||
|
||||
def _easteregg(app=None):
|
||||
"""Like the name says. But who knows how it works?"""
|
||||
|
||||
def bzzzzzzz(gyver):
|
||||
import base64
|
||||
import zlib
|
||||
|
||||
return zlib.decompress(base64.b64decode(gyver)).decode("ascii")
|
||||
|
||||
gyver = u"\n".join(
|
||||
[
|
||||
x + (77 - len(x)) * u" "
|
||||
for x in bzzzzzzz(
|
||||
b"""
|
||||
eJyFlzuOJDkMRP06xRjymKgDJCDQStBYT8BCgK4gTwfQ2fcFs2a2FzvZk+hvlcRvRJD148efHt9m
|
||||
9Xz94dRY5hGt1nrYcXx7us9qlcP9HHNh28rz8dZj+q4rynVFFPdlY4zH873NKCexrDM6zxxRymzz
|
||||
4QIxzK4bth1PV7+uHn6WXZ5C4ka/+prFzx3zWLMHAVZb8RRUxtFXI5DTQ2n3Hi2sNI+HK43AOWSY
|
||||
jmEzE4naFp58PdzhPMdslLVWHTGUVpSxImw+pS/D+JhzLfdS1j7PzUMxij+mc2U0I9zcbZ/HcZxc
|
||||
q1QjvvcThMYFnp93agEx392ZdLJWXbi/Ca4Oivl4h/Y1ErEqP+lrg7Xa4qnUKu5UE9UUA4xeqLJ5
|
||||
jWlPKJvR2yhRI7xFPdzPuc6adXu6ovwXwRPXXnZHxlPtkSkqWHilsOrGrvcVWXgGP3daXomCj317
|
||||
8P2UOw/NnA0OOikZyFf3zZ76eN9QXNwYdD8f8/LdBRFg0BO3bB+Pe/+G8er8tDJv83XTkj7WeMBJ
|
||||
v/rnAfdO51d6sFglfi8U7zbnr0u9tyJHhFZNXYfH8Iafv2Oa+DT6l8u9UYlajV/hcEgk1x8E8L/r
|
||||
XJXl2SK+GJCxtnyhVKv6GFCEB1OO3f9YWAIEbwcRWv/6RPpsEzOkXURMN37J0PoCSYeBnJQd9Giu
|
||||
LxYQJNlYPSo/iTQwgaihbART7Fcyem2tTSCcwNCs85MOOpJtXhXDe0E7zgZJkcxWTar/zEjdIVCk
|
||||
iXy87FW6j5aGZhttDBoAZ3vnmlkx4q4mMmCdLtnHkBXFMCReqthSGkQ+MDXLLCpXwBs0t+sIhsDI
|
||||
tjBB8MwqYQpLygZ56rRHHpw+OAVyGgaGRHWy2QfXez+ZQQTTBkmRXdV/A9LwH6XGZpEAZU8rs4pE
|
||||
1R4FQ3Uwt8RKEtRc0/CrANUoes3EzM6WYcFyskGZ6UTHJWenBDS7h163Eo2bpzqxNE9aVgEM2CqI
|
||||
GAJe9Yra4P5qKmta27VjzYdR04Vc7KHeY4vs61C0nbywFmcSXYjzBHdiEjraS7PGG2jHHTpJUMxN
|
||||
Jlxr3pUuFvlBWLJGE3GcA1/1xxLcHmlO+LAXbhrXah1tD6Ze+uqFGdZa5FM+3eHcKNaEarutAQ0A
|
||||
QMAZHV+ve6LxAwWnXbbSXEG2DmCX5ijeLCKj5lhVFBrMm+ryOttCAeFpUdZyQLAQkA06RLs56rzG
|
||||
8MID55vqr/g64Qr/wqwlE0TVxgoiZhHrbY2h1iuuyUVg1nlkpDrQ7Vm1xIkI5XRKLedN9EjzVchu
|
||||
jQhXcVkjVdgP2O99QShpdvXWoSwkp5uMwyjt3jiWCqWGSiaaPAzohjPanXVLbM3x0dNskJsaCEyz
|
||||
DTKIs+7WKJD4ZcJGfMhLFBf6hlbnNkLEePF8Cx2o2kwmYF4+MzAxa6i+6xIQkswOqGO+3x9NaZX8
|
||||
MrZRaFZpLeVTYI9F/djY6DDVVs340nZGmwrDqTCiiqD5luj3OzwpmQCiQhdRYowUYEA3i1WWGwL4
|
||||
GCtSoO4XbIPFeKGU13XPkDf5IdimLpAvi2kVDVQbzOOa4KAXMFlpi/hV8F6IDe0Y2reg3PuNKT3i
|
||||
RYhZqtkQZqSB2Qm0SGtjAw7RDwaM1roESC8HWiPxkoOy0lLTRFG39kvbLZbU9gFKFRvixDZBJmpi
|
||||
Xyq3RE5lW00EJjaqwp/v3EByMSpVZYsEIJ4APaHmVtpGSieV5CALOtNUAzTBiw81GLgC0quyzf6c
|
||||
NlWknzJeCsJ5fup2R4d8CYGN77mu5vnO1UqbfElZ9E6cR6zbHjgsr9ly18fXjZoPeDjPuzlWbFwS
|
||||
pdvPkhntFvkc13qb9094LL5NrA3NIq3r9eNnop9DizWOqCEbyRBFJTHn6Tt3CG1o8a4HevYh0XiJ
|
||||
sR0AVVHuGuMOIfbuQ/OKBkGRC6NJ4u7sbPX8bG/n5sNIOQ6/Y/BX3IwRlTSabtZpYLB85lYtkkgm
|
||||
p1qXK3Du2mnr5INXmT/78KI12n11EFBkJHHp0wJyLe9MvPNUGYsf+170maayRoy2lURGHAIapSpQ
|
||||
krEDuNoJCHNlZYhKpvw4mspVWxqo415n8cD62N9+EfHrAvqQnINStetek7RY2Urv8nxsnGaZfRr/
|
||||
nhXbJ6m/yl1LzYqscDZA9QHLNbdaSTTr+kFg3bC0iYbX/eQy0Bv3h4B50/SGYzKAXkCeOLI3bcAt
|
||||
mj2Z/FM1vQWgDynsRwNvrWnJHlespkrp8+vO1jNaibm+PhqXPPv30YwDZ6jApe3wUjFQobghvW9p
|
||||
7f2zLkGNv8b191cD/3vs9Q833z8t"""
|
||||
).splitlines()
|
||||
]
|
||||
)
|
||||
|
||||
def easteregged(environ, start_response):
|
||||
def injecting_start_response(status, headers, exc_info=None):
|
||||
headers.append(("X-Powered-By", "Werkzeug"))
|
||||
return start_response(status, headers, exc_info)
|
||||
|
||||
if app is not None and environ.get("QUERY_STRING") != "macgybarchakku":
|
||||
return app(environ, injecting_start_response)
|
||||
injecting_start_response("200 OK", [("Content-Type", "text/html")])
|
||||
return [
|
||||
(
|
||||
u"""
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>About Werkzeug</title>
|
||||
<style type="text/css">
|
||||
body { font: 15px Georgia, serif; text-align: center; }
|
||||
a { color: #333; text-decoration: none; }
|
||||
h1 { font-size: 30px; margin: 20px 0 10px 0; }
|
||||
p { margin: 0 0 30px 0; }
|
||||
pre { font: 11px 'Consolas', 'Monaco', monospace; line-height: 0.95; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1><a href="http://werkzeug.pocoo.org/">Werkzeug</a></h1>
|
||||
<p>the Swiss Army knife of Python web development.</p>
|
||||
<pre>%s\n\n\n</pre>
|
||||
</body>
|
||||
</html>"""
|
||||
% gyver
|
||||
).encode("latin1")
|
||||
]
|
||||
|
||||
return easteregged
|
||||
334
python/werkzeug/_reloader.py
Normal file
334
python/werkzeug/_reloader.py
Normal file
@@ -0,0 +1,334 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
from itertools import chain
|
||||
|
||||
from ._compat import iteritems
|
||||
from ._compat import PY2
|
||||
from ._compat import text_type
|
||||
from ._internal import _log
|
||||
|
||||
|
||||
def _iter_module_files():
|
||||
"""This iterates over all relevant Python files. It goes through all
|
||||
loaded files from modules, all files in folders of already loaded modules
|
||||
as well as all files reachable through a package.
|
||||
"""
|
||||
# The list call is necessary on Python 3 in case the module
|
||||
# dictionary modifies during iteration.
|
||||
for module in list(sys.modules.values()):
|
||||
if module is None:
|
||||
continue
|
||||
filename = getattr(module, "__file__", None)
|
||||
if filename:
|
||||
if os.path.isdir(filename) and os.path.exists(
|
||||
os.path.join(filename, "__init__.py")
|
||||
):
|
||||
filename = os.path.join(filename, "__init__.py")
|
||||
|
||||
old = None
|
||||
while not os.path.isfile(filename):
|
||||
old = filename
|
||||
filename = os.path.dirname(filename)
|
||||
if filename == old:
|
||||
break
|
||||
else:
|
||||
if filename[-4:] in (".pyc", ".pyo"):
|
||||
filename = filename[:-1]
|
||||
yield filename
|
||||
|
||||
|
||||
def _find_observable_paths(extra_files=None):
|
||||
"""Finds all paths that should be observed."""
|
||||
rv = set(
|
||||
os.path.dirname(os.path.abspath(x)) if os.path.isfile(x) else os.path.abspath(x)
|
||||
for x in sys.path
|
||||
)
|
||||
|
||||
for filename in extra_files or ():
|
||||
rv.add(os.path.dirname(os.path.abspath(filename)))
|
||||
|
||||
for module in list(sys.modules.values()):
|
||||
fn = getattr(module, "__file__", None)
|
||||
if fn is None:
|
||||
continue
|
||||
fn = os.path.abspath(fn)
|
||||
rv.add(os.path.dirname(fn))
|
||||
|
||||
return _find_common_roots(rv)
|
||||
|
||||
|
||||
def _get_args_for_reloading():
|
||||
"""Returns the executable. This contains a workaround for windows
|
||||
if the executable is incorrectly reported to not have the .exe
|
||||
extension which can cause bugs on reloading. This also contains
|
||||
a workaround for linux where the file is executable (possibly with
|
||||
a program other than python)
|
||||
"""
|
||||
rv = [sys.executable]
|
||||
py_script = os.path.abspath(sys.argv[0])
|
||||
args = sys.argv[1:]
|
||||
# Need to look at main module to determine how it was executed.
|
||||
__main__ = sys.modules["__main__"]
|
||||
|
||||
if __main__.__package__ is None:
|
||||
# Executed a file, like "python app.py".
|
||||
if os.name == "nt":
|
||||
# Windows entry points have ".exe" extension and should be
|
||||
# called directly.
|
||||
if not os.path.exists(py_script) and os.path.exists(py_script + ".exe"):
|
||||
py_script += ".exe"
|
||||
|
||||
if (
|
||||
os.path.splitext(rv[0])[1] == ".exe"
|
||||
and os.path.splitext(py_script)[1] == ".exe"
|
||||
):
|
||||
rv.pop(0)
|
||||
|
||||
elif os.path.isfile(py_script) and os.access(py_script, os.X_OK):
|
||||
# The file is marked as executable. Nix adds a wrapper that
|
||||
# shouldn't be called with the Python executable.
|
||||
rv.pop(0)
|
||||
|
||||
rv.append(py_script)
|
||||
else:
|
||||
# Executed a module, like "python -m werkzeug.serving".
|
||||
if sys.argv[0] == "-m":
|
||||
# Flask works around previous behavior by putting
|
||||
# "-m flask" in sys.argv.
|
||||
# TODO remove this once Flask no longer misbehaves
|
||||
args = sys.argv
|
||||
else:
|
||||
py_module = __main__.__package__
|
||||
name = os.path.splitext(os.path.basename(py_script))[0]
|
||||
|
||||
if name != "__main__":
|
||||
py_module += "." + name
|
||||
|
||||
rv.extend(("-m", py_module.lstrip(".")))
|
||||
|
||||
rv.extend(args)
|
||||
return rv
|
||||
|
||||
|
||||
def _find_common_roots(paths):
|
||||
"""Out of some paths it finds the common roots that need monitoring."""
|
||||
paths = [x.split(os.path.sep) for x in paths]
|
||||
root = {}
|
||||
for chunks in sorted(paths, key=len, reverse=True):
|
||||
node = root
|
||||
for chunk in chunks:
|
||||
node = node.setdefault(chunk, {})
|
||||
node.clear()
|
||||
|
||||
rv = set()
|
||||
|
||||
def _walk(node, path):
|
||||
for prefix, child in iteritems(node):
|
||||
_walk(child, path + (prefix,))
|
||||
if not node:
|
||||
rv.add("/".join(path))
|
||||
|
||||
_walk(root, ())
|
||||
return rv
|
||||
|
||||
|
||||
class ReloaderLoop(object):
|
||||
name = None
|
||||
|
||||
# monkeypatched by testsuite. wrapping with `staticmethod` is required in
|
||||
# case time.sleep has been replaced by a non-c function (e.g. by
|
||||
# `eventlet.monkey_patch`) before we get here
|
||||
_sleep = staticmethod(time.sleep)
|
||||
|
||||
def __init__(self, extra_files=None, interval=1):
|
||||
self.extra_files = set(os.path.abspath(x) for x in extra_files or ())
|
||||
self.interval = interval
|
||||
|
||||
def run(self):
|
||||
pass
|
||||
|
||||
def restart_with_reloader(self):
|
||||
"""Spawn a new Python interpreter with the same arguments as this one,
|
||||
but running the reloader thread.
|
||||
"""
|
||||
while 1:
|
||||
_log("info", " * Restarting with %s" % self.name)
|
||||
args = _get_args_for_reloading()
|
||||
|
||||
# a weird bug on windows. sometimes unicode strings end up in the
|
||||
# environment and subprocess.call does not like this, encode them
|
||||
# to latin1 and continue.
|
||||
if os.name == "nt" and PY2:
|
||||
new_environ = {}
|
||||
for key, value in iteritems(os.environ):
|
||||
if isinstance(key, text_type):
|
||||
key = key.encode("iso-8859-1")
|
||||
if isinstance(value, text_type):
|
||||
value = value.encode("iso-8859-1")
|
||||
new_environ[key] = value
|
||||
else:
|
||||
new_environ = os.environ.copy()
|
||||
|
||||
new_environ["WERKZEUG_RUN_MAIN"] = "true"
|
||||
exit_code = subprocess.call(args, env=new_environ, close_fds=False)
|
||||
if exit_code != 3:
|
||||
return exit_code
|
||||
|
||||
def trigger_reload(self, filename):
|
||||
self.log_reload(filename)
|
||||
sys.exit(3)
|
||||
|
||||
def log_reload(self, filename):
|
||||
filename = os.path.abspath(filename)
|
||||
_log("info", " * Detected change in %r, reloading" % filename)
|
||||
|
||||
|
||||
class StatReloaderLoop(ReloaderLoop):
|
||||
name = "stat"
|
||||
|
||||
def run(self):
|
||||
mtimes = {}
|
||||
while 1:
|
||||
for filename in chain(_iter_module_files(), self.extra_files):
|
||||
try:
|
||||
mtime = os.stat(filename).st_mtime
|
||||
except OSError:
|
||||
continue
|
||||
|
||||
old_time = mtimes.get(filename)
|
||||
if old_time is None:
|
||||
mtimes[filename] = mtime
|
||||
continue
|
||||
elif mtime > old_time:
|
||||
self.trigger_reload(filename)
|
||||
self._sleep(self.interval)
|
||||
|
||||
|
||||
class WatchdogReloaderLoop(ReloaderLoop):
|
||||
def __init__(self, *args, **kwargs):
|
||||
ReloaderLoop.__init__(self, *args, **kwargs)
|
||||
from watchdog.observers import Observer
|
||||
from watchdog.events import FileSystemEventHandler
|
||||
|
||||
self.observable_paths = set()
|
||||
|
||||
def _check_modification(filename):
|
||||
if filename in self.extra_files:
|
||||
self.trigger_reload(filename)
|
||||
dirname = os.path.dirname(filename)
|
||||
if dirname.startswith(tuple(self.observable_paths)):
|
||||
if filename.endswith((".pyc", ".pyo", ".py")):
|
||||
self.trigger_reload(filename)
|
||||
|
||||
class _CustomHandler(FileSystemEventHandler):
|
||||
def on_created(self, event):
|
||||
_check_modification(event.src_path)
|
||||
|
||||
def on_modified(self, event):
|
||||
_check_modification(event.src_path)
|
||||
|
||||
def on_moved(self, event):
|
||||
_check_modification(event.src_path)
|
||||
_check_modification(event.dest_path)
|
||||
|
||||
def on_deleted(self, event):
|
||||
_check_modification(event.src_path)
|
||||
|
||||
reloader_name = Observer.__name__.lower()
|
||||
if reloader_name.endswith("observer"):
|
||||
reloader_name = reloader_name[:-8]
|
||||
reloader_name += " reloader"
|
||||
|
||||
self.name = reloader_name
|
||||
|
||||
self.observer_class = Observer
|
||||
self.event_handler = _CustomHandler()
|
||||
self.should_reload = False
|
||||
|
||||
def trigger_reload(self, filename):
|
||||
# This is called inside an event handler, which means throwing
|
||||
# SystemExit has no effect.
|
||||
# https://github.com/gorakhargosh/watchdog/issues/294
|
||||
self.should_reload = True
|
||||
self.log_reload(filename)
|
||||
|
||||
def run(self):
|
||||
watches = {}
|
||||
observer = self.observer_class()
|
||||
observer.start()
|
||||
|
||||
try:
|
||||
while not self.should_reload:
|
||||
to_delete = set(watches)
|
||||
paths = _find_observable_paths(self.extra_files)
|
||||
for path in paths:
|
||||
if path not in watches:
|
||||
try:
|
||||
watches[path] = observer.schedule(
|
||||
self.event_handler, path, recursive=True
|
||||
)
|
||||
except OSError:
|
||||
# Clear this path from list of watches We don't want
|
||||
# the same error message showing again in the next
|
||||
# iteration.
|
||||
watches[path] = None
|
||||
to_delete.discard(path)
|
||||
for path in to_delete:
|
||||
watch = watches.pop(path, None)
|
||||
if watch is not None:
|
||||
observer.unschedule(watch)
|
||||
self.observable_paths = paths
|
||||
self._sleep(self.interval)
|
||||
finally:
|
||||
observer.stop()
|
||||
observer.join()
|
||||
|
||||
sys.exit(3)
|
||||
|
||||
|
||||
reloader_loops = {"stat": StatReloaderLoop, "watchdog": WatchdogReloaderLoop}
|
||||
|
||||
try:
|
||||
__import__("watchdog.observers")
|
||||
except ImportError:
|
||||
reloader_loops["auto"] = reloader_loops["stat"]
|
||||
else:
|
||||
reloader_loops["auto"] = reloader_loops["watchdog"]
|
||||
|
||||
|
||||
def ensure_echo_on():
|
||||
"""Ensure that echo mode is enabled. Some tools such as PDB disable
|
||||
it which causes usability issues after reload."""
|
||||
# tcgetattr will fail if stdin isn't a tty
|
||||
if not sys.stdin.isatty():
|
||||
return
|
||||
try:
|
||||
import termios
|
||||
except ImportError:
|
||||
return
|
||||
attributes = termios.tcgetattr(sys.stdin)
|
||||
if not attributes[3] & termios.ECHO:
|
||||
attributes[3] |= termios.ECHO
|
||||
termios.tcsetattr(sys.stdin, termios.TCSANOW, attributes)
|
||||
|
||||
|
||||
def run_with_reloader(main_func, extra_files=None, interval=1, reloader_type="auto"):
|
||||
"""Run the given function in an independent python interpreter."""
|
||||
import signal
|
||||
|
||||
reloader = reloader_loops[reloader_type](extra_files, interval)
|
||||
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
|
||||
try:
|
||||
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
|
||||
ensure_echo_on()
|
||||
t = threading.Thread(target=main_func, args=())
|
||||
t.setDaemon(True)
|
||||
t.start()
|
||||
reloader.run()
|
||||
else:
|
||||
sys.exit(reloader.restart_with_reloader())
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
16
python/werkzeug/contrib/__init__.py
Normal file
16
python/werkzeug/contrib/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.contrib
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Contains user-submitted code that other users may find useful, but which
|
||||
is not part of the Werkzeug core. Anyone can write code for inclusion in
|
||||
the `contrib` package. All modules in this package are distributed as an
|
||||
add-on library and thus are not part of Werkzeug itself.
|
||||
|
||||
This file itself is mostly for informational purposes and to tell the
|
||||
Python interpreter that `contrib` is a package.
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
362
python/werkzeug/contrib/atom.py
Normal file
362
python/werkzeug/contrib/atom.py
Normal file
@@ -0,0 +1,362 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.contrib.atom
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module provides a class called :class:`AtomFeed` which can be
|
||||
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
|
||||
|
||||
Example::
|
||||
|
||||
def atom_feed(request):
|
||||
feed = AtomFeed("My Blog", feed_url=request.url,
|
||||
url=request.host_url,
|
||||
subtitle="My example blog for a feed test.")
|
||||
for post in Post.query.limit(10).all():
|
||||
feed.add(post.title, post.body, content_type='html',
|
||||
author=post.author, url=post.url, id=post.uid,
|
||||
updated=post.last_update, published=post.pub_date)
|
||||
return feed.get_response()
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import warnings
|
||||
from datetime import datetime
|
||||
|
||||
from .._compat import implements_to_string
|
||||
from .._compat import string_types
|
||||
from ..utils import escape
|
||||
from ..wrappers import BaseResponse
|
||||
|
||||
warnings.warn(
|
||||
"'werkzeug.contrib.atom' is deprecated as of version 0.15 and will"
|
||||
" be removed in version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
|
||||
|
||||
|
||||
def _make_text_block(name, content, content_type=None):
|
||||
"""Helper function for the builder that creates an XML text block."""
|
||||
if content_type == "xhtml":
|
||||
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % (
|
||||
name,
|
||||
XHTML_NAMESPACE,
|
||||
content,
|
||||
name,
|
||||
)
|
||||
if not content_type:
|
||||
return u"<%s>%s</%s>\n" % (name, escape(content), name)
|
||||
return u'<%s type="%s">%s</%s>\n' % (name, content_type, escape(content), name)
|
||||
|
||||
|
||||
def format_iso8601(obj):
|
||||
"""Format a datetime object for iso8601"""
|
||||
iso8601 = obj.isoformat()
|
||||
if obj.tzinfo:
|
||||
return iso8601
|
||||
return iso8601 + "Z"
|
||||
|
||||
|
||||
@implements_to_string
|
||||
class AtomFeed(object):
|
||||
|
||||
"""A helper class that creates Atom feeds.
|
||||
|
||||
:param title: the title of the feed. Required.
|
||||
:param title_type: the type attribute for the title element. One of
|
||||
``'html'``, ``'text'`` or ``'xhtml'``.
|
||||
:param url: the url for the feed (not the url *of* the feed)
|
||||
:param id: a globally unique id for the feed. Must be an URI. If
|
||||
not present the `feed_url` is used, but one of both is
|
||||
required.
|
||||
:param updated: the time the feed was modified the last time. Must
|
||||
be a :class:`datetime.datetime` object. If not
|
||||
present the latest entry's `updated` is used.
|
||||
Treated as UTC if naive datetime.
|
||||
:param feed_url: the URL to the feed. Should be the URL that was
|
||||
requested.
|
||||
:param author: the author of the feed. Must be either a string (the
|
||||
name) or a dict with name (required) and uri or
|
||||
email (both optional). Can be a list of (may be
|
||||
mixed, too) strings and dicts, too, if there are
|
||||
multiple authors. Required if not every entry has an
|
||||
author element.
|
||||
:param icon: an icon for the feed.
|
||||
:param logo: a logo for the feed.
|
||||
:param rights: copyright information for the feed.
|
||||
:param rights_type: the type attribute for the rights element. One of
|
||||
``'html'``, ``'text'`` or ``'xhtml'``. Default is
|
||||
``'text'``.
|
||||
:param subtitle: a short description of the feed.
|
||||
:param subtitle_type: the type attribute for the subtitle element.
|
||||
One of ``'text'``, ``'html'``, ``'text'``
|
||||
or ``'xhtml'``. Default is ``'text'``.
|
||||
:param links: additional links. Must be a list of dictionaries with
|
||||
href (required) and rel, type, hreflang, title, length
|
||||
(all optional)
|
||||
:param generator: the software that generated this feed. This must be
|
||||
a tuple in the form ``(name, url, version)``. If
|
||||
you don't want to specify one of them, set the item
|
||||
to `None`.
|
||||
:param entries: a list with the entries for the feed. Entries can also
|
||||
be added later with :meth:`add`.
|
||||
|
||||
For more information on the elements see
|
||||
http://www.atomenabled.org/developers/syndication/
|
||||
|
||||
Everywhere where a list is demanded, any iterable can be used.
|
||||
"""
|
||||
|
||||
default_generator = ("Werkzeug", None, None)
|
||||
|
||||
def __init__(self, title=None, entries=None, **kwargs):
|
||||
self.title = title
|
||||
self.title_type = kwargs.get("title_type", "text")
|
||||
self.url = kwargs.get("url")
|
||||
self.feed_url = kwargs.get("feed_url", self.url)
|
||||
self.id = kwargs.get("id", self.feed_url)
|
||||
self.updated = kwargs.get("updated")
|
||||
self.author = kwargs.get("author", ())
|
||||
self.icon = kwargs.get("icon")
|
||||
self.logo = kwargs.get("logo")
|
||||
self.rights = kwargs.get("rights")
|
||||
self.rights_type = kwargs.get("rights_type")
|
||||
self.subtitle = kwargs.get("subtitle")
|
||||
self.subtitle_type = kwargs.get("subtitle_type", "text")
|
||||
self.generator = kwargs.get("generator")
|
||||
if self.generator is None:
|
||||
self.generator = self.default_generator
|
||||
self.links = kwargs.get("links", [])
|
||||
self.entries = list(entries) if entries else []
|
||||
|
||||
if not hasattr(self.author, "__iter__") or isinstance(
|
||||
self.author, string_types + (dict,)
|
||||
):
|
||||
self.author = [self.author]
|
||||
for i, author in enumerate(self.author):
|
||||
if not isinstance(author, dict):
|
||||
self.author[i] = {"name": author}
|
||||
|
||||
if not self.title:
|
||||
raise ValueError("title is required")
|
||||
if not self.id:
|
||||
raise ValueError("id is required")
|
||||
for author in self.author:
|
||||
if "name" not in author:
|
||||
raise TypeError("author must contain at least a name")
|
||||
|
||||
def add(self, *args, **kwargs):
|
||||
"""Add a new entry to the feed. This function can either be called
|
||||
with a :class:`FeedEntry` or some keyword and positional arguments
|
||||
that are forwarded to the :class:`FeedEntry` constructor.
|
||||
"""
|
||||
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
|
||||
self.entries.append(args[0])
|
||||
else:
|
||||
kwargs["feed_url"] = self.feed_url
|
||||
self.entries.append(FeedEntry(*args, **kwargs))
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s %r (%d entries)>" % (
|
||||
self.__class__.__name__,
|
||||
self.title,
|
||||
len(self.entries),
|
||||
)
|
||||
|
||||
def generate(self):
|
||||
"""Return a generator that yields pieces of XML."""
|
||||
# atom demands either an author element in every entry or a global one
|
||||
if not self.author:
|
||||
if any(not e.author for e in self.entries):
|
||||
self.author = ({"name": "Unknown author"},)
|
||||
|
||||
if not self.updated:
|
||||
dates = sorted([entry.updated for entry in self.entries])
|
||||
self.updated = dates[-1] if dates else datetime.utcnow()
|
||||
|
||||
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
|
||||
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
|
||||
yield " " + _make_text_block("title", self.title, self.title_type)
|
||||
yield u" <id>%s</id>\n" % escape(self.id)
|
||||
yield u" <updated>%s</updated>\n" % format_iso8601(self.updated)
|
||||
if self.url:
|
||||
yield u' <link href="%s" />\n' % escape(self.url)
|
||||
if self.feed_url:
|
||||
yield u' <link href="%s" rel="self" />\n' % escape(self.feed_url)
|
||||
for link in self.links:
|
||||
yield u" <link %s/>\n" % "".join(
|
||||
'%s="%s" ' % (k, escape(link[k])) for k in link
|
||||
)
|
||||
for author in self.author:
|
||||
yield u" <author>\n"
|
||||
yield u" <name>%s</name>\n" % escape(author["name"])
|
||||
if "uri" in author:
|
||||
yield u" <uri>%s</uri>\n" % escape(author["uri"])
|
||||
if "email" in author:
|
||||
yield " <email>%s</email>\n" % escape(author["email"])
|
||||
yield " </author>\n"
|
||||
if self.subtitle:
|
||||
yield " " + _make_text_block("subtitle", self.subtitle, self.subtitle_type)
|
||||
if self.icon:
|
||||
yield u" <icon>%s</icon>\n" % escape(self.icon)
|
||||
if self.logo:
|
||||
yield u" <logo>%s</logo>\n" % escape(self.logo)
|
||||
if self.rights:
|
||||
yield " " + _make_text_block("rights", self.rights, self.rights_type)
|
||||
generator_name, generator_url, generator_version = self.generator
|
||||
if generator_name or generator_url or generator_version:
|
||||
tmp = [u" <generator"]
|
||||
if generator_url:
|
||||
tmp.append(u' uri="%s"' % escape(generator_url))
|
||||
if generator_version:
|
||||
tmp.append(u' version="%s"' % escape(generator_version))
|
||||
tmp.append(u">%s</generator>\n" % escape(generator_name))
|
||||
yield u"".join(tmp)
|
||||
for entry in self.entries:
|
||||
for line in entry.generate():
|
||||
yield u" " + line
|
||||
yield u"</feed>\n"
|
||||
|
||||
def to_string(self):
|
||||
"""Convert the feed into a string."""
|
||||
return u"".join(self.generate())
|
||||
|
||||
def get_response(self):
|
||||
"""Return a response object for the feed."""
|
||||
return BaseResponse(self.to_string(), mimetype="application/atom+xml")
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
"""Use the class as WSGI response object."""
|
||||
return self.get_response()(environ, start_response)
|
||||
|
||||
def __str__(self):
|
||||
return self.to_string()
|
||||
|
||||
|
||||
@implements_to_string
|
||||
class FeedEntry(object):
|
||||
|
||||
"""Represents a single entry in a feed.
|
||||
|
||||
:param title: the title of the entry. Required.
|
||||
:param title_type: the type attribute for the title element. One of
|
||||
``'html'``, ``'text'`` or ``'xhtml'``.
|
||||
:param content: the content of the entry.
|
||||
:param content_type: the type attribute for the content element. One
|
||||
of ``'html'``, ``'text'`` or ``'xhtml'``.
|
||||
:param summary: a summary of the entry's content.
|
||||
:param summary_type: the type attribute for the summary element. One
|
||||
of ``'html'``, ``'text'`` or ``'xhtml'``.
|
||||
:param url: the url for the entry.
|
||||
:param id: a globally unique id for the entry. Must be an URI. If
|
||||
not present the URL is used, but one of both is required.
|
||||
:param updated: the time the entry was modified the last time. Must
|
||||
be a :class:`datetime.datetime` object. Treated as
|
||||
UTC if naive datetime. Required.
|
||||
:param author: the author of the entry. Must be either a string (the
|
||||
name) or a dict with name (required) and uri or
|
||||
email (both optional). Can be a list of (may be
|
||||
mixed, too) strings and dicts, too, if there are
|
||||
multiple authors. Required if the feed does not have an
|
||||
author element.
|
||||
:param published: the time the entry was initially published. Must
|
||||
be a :class:`datetime.datetime` object. Treated as
|
||||
UTC if naive datetime.
|
||||
:param rights: copyright information for the entry.
|
||||
:param rights_type: the type attribute for the rights element. One of
|
||||
``'html'``, ``'text'`` or ``'xhtml'``. Default is
|
||||
``'text'``.
|
||||
:param links: additional links. Must be a list of dictionaries with
|
||||
href (required) and rel, type, hreflang, title, length
|
||||
(all optional)
|
||||
:param categories: categories for the entry. Must be a list of dictionaries
|
||||
with term (required), scheme and label (all optional)
|
||||
:param xml_base: The xml base (url) for this feed item. If not provided
|
||||
it will default to the item url.
|
||||
|
||||
For more information on the elements see
|
||||
http://www.atomenabled.org/developers/syndication/
|
||||
|
||||
Everywhere where a list is demanded, any iterable can be used.
|
||||
"""
|
||||
|
||||
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
|
||||
self.title = title
|
||||
self.title_type = kwargs.get("title_type", "text")
|
||||
self.content = content
|
||||
self.content_type = kwargs.get("content_type", "html")
|
||||
self.url = kwargs.get("url")
|
||||
self.id = kwargs.get("id", self.url)
|
||||
self.updated = kwargs.get("updated")
|
||||
self.summary = kwargs.get("summary")
|
||||
self.summary_type = kwargs.get("summary_type", "html")
|
||||
self.author = kwargs.get("author", ())
|
||||
self.published = kwargs.get("published")
|
||||
self.rights = kwargs.get("rights")
|
||||
self.links = kwargs.get("links", [])
|
||||
self.categories = kwargs.get("categories", [])
|
||||
self.xml_base = kwargs.get("xml_base", feed_url)
|
||||
|
||||
if not hasattr(self.author, "__iter__") or isinstance(
|
||||
self.author, string_types + (dict,)
|
||||
):
|
||||
self.author = [self.author]
|
||||
for i, author in enumerate(self.author):
|
||||
if not isinstance(author, dict):
|
||||
self.author[i] = {"name": author}
|
||||
|
||||
if not self.title:
|
||||
raise ValueError("title is required")
|
||||
if not self.id:
|
||||
raise ValueError("id is required")
|
||||
if not self.updated:
|
||||
raise ValueError("updated is required")
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s %r>" % (self.__class__.__name__, self.title)
|
||||
|
||||
def generate(self):
|
||||
"""Yields pieces of ATOM XML."""
|
||||
base = ""
|
||||
if self.xml_base:
|
||||
base = ' xml:base="%s"' % escape(self.xml_base)
|
||||
yield u"<entry%s>\n" % base
|
||||
yield u" " + _make_text_block("title", self.title, self.title_type)
|
||||
yield u" <id>%s</id>\n" % escape(self.id)
|
||||
yield u" <updated>%s</updated>\n" % format_iso8601(self.updated)
|
||||
if self.published:
|
||||
yield u" <published>%s</published>\n" % format_iso8601(self.published)
|
||||
if self.url:
|
||||
yield u' <link href="%s" />\n' % escape(self.url)
|
||||
for author in self.author:
|
||||
yield u" <author>\n"
|
||||
yield u" <name>%s</name>\n" % escape(author["name"])
|
||||
if "uri" in author:
|
||||
yield u" <uri>%s</uri>\n" % escape(author["uri"])
|
||||
if "email" in author:
|
||||
yield u" <email>%s</email>\n" % escape(author["email"])
|
||||
yield u" </author>\n"
|
||||
for link in self.links:
|
||||
yield u" <link %s/>\n" % "".join(
|
||||
'%s="%s" ' % (k, escape(link[k])) for k in link
|
||||
)
|
||||
for category in self.categories:
|
||||
yield u" <category %s/>\n" % "".join(
|
||||
'%s="%s" ' % (k, escape(category[k])) for k in category
|
||||
)
|
||||
if self.summary:
|
||||
yield u" " + _make_text_block("summary", self.summary, self.summary_type)
|
||||
if self.content:
|
||||
yield u" " + _make_text_block("content", self.content, self.content_type)
|
||||
yield u"</entry>\n"
|
||||
|
||||
def to_string(self):
|
||||
"""Convert the feed item into a unicode object."""
|
||||
return u"".join(self.generate())
|
||||
|
||||
def __str__(self):
|
||||
return self.to_string()
|
||||
933
python/werkzeug/contrib/cache.py
Normal file
933
python/werkzeug/contrib/cache.py
Normal file
@@ -0,0 +1,933 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.contrib.cache
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The main problem with dynamic Web sites is, well, they're dynamic. Each
|
||||
time a user requests a page, the webserver executes a lot of code, queries
|
||||
the database, renders templates until the visitor gets the page he sees.
|
||||
|
||||
This is a lot more expensive than just loading a file from the file system
|
||||
and sending it to the visitor.
|
||||
|
||||
For most Web applications, this overhead isn't a big deal but once it
|
||||
becomes, you will be glad to have a cache system in place.
|
||||
|
||||
How Caching Works
|
||||
=================
|
||||
|
||||
Caching is pretty simple. Basically you have a cache object lurking around
|
||||
somewhere that is connected to a remote cache or the file system or
|
||||
something else. When the request comes in you check if the current page
|
||||
is already in the cache and if so, you're returning it from the cache.
|
||||
Otherwise you generate the page and put it into the cache. (Or a fragment
|
||||
of the page, you don't have to cache the full thing)
|
||||
|
||||
Here is a simple example of how to cache a sidebar for 5 minutes::
|
||||
|
||||
def get_sidebar(user):
|
||||
identifier = 'sidebar_for/user%d' % user.id
|
||||
value = cache.get(identifier)
|
||||
if value is not None:
|
||||
return value
|
||||
value = generate_sidebar_for(user=user)
|
||||
cache.set(identifier, value, timeout=60 * 5)
|
||||
return value
|
||||
|
||||
Creating a Cache Object
|
||||
=======================
|
||||
|
||||
To create a cache object you just import the cache system of your choice
|
||||
from the cache module and instantiate it. Then you can start working
|
||||
with that object:
|
||||
|
||||
>>> from werkzeug.contrib.cache import SimpleCache
|
||||
>>> c = SimpleCache()
|
||||
>>> c.set("foo", "value")
|
||||
>>> c.get("foo")
|
||||
'value'
|
||||
>>> c.get("missing") is None
|
||||
True
|
||||
|
||||
Please keep in mind that you have to create the cache and put it somewhere
|
||||
you have access to it (either as a module global you can import or you just
|
||||
put it into your WSGI application).
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import errno
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import tempfile
|
||||
import warnings
|
||||
from hashlib import md5
|
||||
from time import time
|
||||
|
||||
from .._compat import integer_types
|
||||
from .._compat import iteritems
|
||||
from .._compat import string_types
|
||||
from .._compat import text_type
|
||||
from .._compat import to_native
|
||||
from ..posixemulation import rename
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError: # pragma: no cover
|
||||
import pickle
|
||||
|
||||
warnings.warn(
|
||||
"'werkzeug.contrib.cache' is deprecated as of version 0.15 and will"
|
||||
" be removed in version 1.0. It has moved to https://github.com"
|
||||
"/pallets/cachelib.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
|
||||
def _items(mappingorseq):
|
||||
"""Wrapper for efficient iteration over mappings represented by dicts
|
||||
or sequences::
|
||||
|
||||
>>> for k, v in _items((i, i*i) for i in xrange(5)):
|
||||
... assert k*k == v
|
||||
|
||||
>>> for k, v in _items(dict((i, i*i) for i in xrange(5))):
|
||||
... assert k*k == v
|
||||
|
||||
"""
|
||||
if hasattr(mappingorseq, "items"):
|
||||
return iteritems(mappingorseq)
|
||||
return mappingorseq
|
||||
|
||||
|
||||
class BaseCache(object):
|
||||
"""Baseclass for the cache systems. All the cache systems implement this
|
||||
API or a superset of it.
|
||||
|
||||
:param default_timeout: the default timeout (in seconds) that is used if
|
||||
no timeout is specified on :meth:`set`. A timeout
|
||||
of 0 indicates that the cache never expires.
|
||||
"""
|
||||
|
||||
def __init__(self, default_timeout=300):
|
||||
self.default_timeout = default_timeout
|
||||
|
||||
def _normalize_timeout(self, timeout):
|
||||
if timeout is None:
|
||||
timeout = self.default_timeout
|
||||
return timeout
|
||||
|
||||
def get(self, key):
|
||||
"""Look up key in the cache and return the value for it.
|
||||
|
||||
:param key: the key to be looked up.
|
||||
:returns: The value if it exists and is readable, else ``None``.
|
||||
"""
|
||||
return None
|
||||
|
||||
def delete(self, key):
|
||||
"""Delete `key` from the cache.
|
||||
|
||||
:param key: the key to delete.
|
||||
:returns: Whether the key existed and has been deleted.
|
||||
:rtype: boolean
|
||||
"""
|
||||
return True
|
||||
|
||||
def get_many(self, *keys):
|
||||
"""Returns a list of values for the given keys.
|
||||
For each key an item in the list is created::
|
||||
|
||||
foo, bar = cache.get_many("foo", "bar")
|
||||
|
||||
Has the same error handling as :meth:`get`.
|
||||
|
||||
:param keys: The function accepts multiple keys as positional
|
||||
arguments.
|
||||
"""
|
||||
return [self.get(k) for k in keys]
|
||||
|
||||
def get_dict(self, *keys):
|
||||
"""Like :meth:`get_many` but return a dict::
|
||||
|
||||
d = cache.get_dict("foo", "bar")
|
||||
foo = d["foo"]
|
||||
bar = d["bar"]
|
||||
|
||||
:param keys: The function accepts multiple keys as positional
|
||||
arguments.
|
||||
"""
|
||||
return dict(zip(keys, self.get_many(*keys)))
|
||||
|
||||
def set(self, key, value, timeout=None):
|
||||
"""Add a new key/value to the cache (overwrites value, if key already
|
||||
exists in the cache).
|
||||
|
||||
:param key: the key to set
|
||||
:param value: the value for the key
|
||||
:param timeout: the cache timeout for the key in seconds (if not
|
||||
specified, it uses the default timeout). A timeout of
|
||||
0 idicates that the cache never expires.
|
||||
:returns: ``True`` if key has been updated, ``False`` for backend
|
||||
errors. Pickling errors, however, will raise a subclass of
|
||||
``pickle.PickleError``.
|
||||
:rtype: boolean
|
||||
"""
|
||||
return True
|
||||
|
||||
def add(self, key, value, timeout=None):
|
||||
"""Works like :meth:`set` but does not overwrite the values of already
|
||||
existing keys.
|
||||
|
||||
:param key: the key to set
|
||||
:param value: the value for the key
|
||||
:param timeout: the cache timeout for the key in seconds (if not
|
||||
specified, it uses the default timeout). A timeout of
|
||||
0 idicates that the cache never expires.
|
||||
:returns: Same as :meth:`set`, but also ``False`` for already
|
||||
existing keys.
|
||||
:rtype: boolean
|
||||
"""
|
||||
return True
|
||||
|
||||
def set_many(self, mapping, timeout=None):
|
||||
"""Sets multiple keys and values from a mapping.
|
||||
|
||||
:param mapping: a mapping with the keys/values to set.
|
||||
:param timeout: the cache timeout for the key in seconds (if not
|
||||
specified, it uses the default timeout). A timeout of
|
||||
0 idicates that the cache never expires.
|
||||
:returns: Whether all given keys have been set.
|
||||
:rtype: boolean
|
||||
"""
|
||||
rv = True
|
||||
for key, value in _items(mapping):
|
||||
if not self.set(key, value, timeout):
|
||||
rv = False
|
||||
return rv
|
||||
|
||||
def delete_many(self, *keys):
|
||||
"""Deletes multiple keys at once.
|
||||
|
||||
:param keys: The function accepts multiple keys as positional
|
||||
arguments.
|
||||
:returns: Whether all given keys have been deleted.
|
||||
:rtype: boolean
|
||||
"""
|
||||
return all(self.delete(key) for key in keys)
|
||||
|
||||
def has(self, key):
|
||||
"""Checks if a key exists in the cache without returning it. This is a
|
||||
cheap operation that bypasses loading the actual data on the backend.
|
||||
|
||||
This method is optional and may not be implemented on all caches.
|
||||
|
||||
:param key: the key to check
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"%s doesn't have an efficient implementation of `has`. That "
|
||||
"means it is impossible to check whether a key exists without "
|
||||
"fully loading the key's data. Consider using `self.get` "
|
||||
"explicitly if you don't care about performance."
|
||||
)
|
||||
|
||||
def clear(self):
|
||||
"""Clears the cache. Keep in mind that not all caches support
|
||||
completely clearing the cache.
|
||||
|
||||
:returns: Whether the cache has been cleared.
|
||||
:rtype: boolean
|
||||
"""
|
||||
return True
|
||||
|
||||
def inc(self, key, delta=1):
|
||||
"""Increments the value of a key by `delta`. If the key does
|
||||
not yet exist it is initialized with `delta`.
|
||||
|
||||
For supporting caches this is an atomic operation.
|
||||
|
||||
:param key: the key to increment.
|
||||
:param delta: the delta to add.
|
||||
:returns: The new value or ``None`` for backend errors.
|
||||
"""
|
||||
value = (self.get(key) or 0) + delta
|
||||
return value if self.set(key, value) else None
|
||||
|
||||
def dec(self, key, delta=1):
|
||||
"""Decrements the value of a key by `delta`. If the key does
|
||||
not yet exist it is initialized with `-delta`.
|
||||
|
||||
For supporting caches this is an atomic operation.
|
||||
|
||||
:param key: the key to increment.
|
||||
:param delta: the delta to subtract.
|
||||
:returns: The new value or `None` for backend errors.
|
||||
"""
|
||||
value = (self.get(key) or 0) - delta
|
||||
return value if self.set(key, value) else None
|
||||
|
||||
|
||||
class NullCache(BaseCache):
|
||||
"""A cache that doesn't cache. This can be useful for unit testing.
|
||||
|
||||
:param default_timeout: a dummy parameter that is ignored but exists
|
||||
for API compatibility with other caches.
|
||||
"""
|
||||
|
||||
def has(self, key):
|
||||
return False
|
||||
|
||||
|
||||
class SimpleCache(BaseCache):
|
||||
"""Simple memory cache for single process environments. This class exists
|
||||
mainly for the development server and is not 100% thread safe. It tries
|
||||
to use as many atomic operations as possible and no locks for simplicity
|
||||
but it could happen under heavy load that keys are added multiple times.
|
||||
|
||||
:param threshold: the maximum number of items the cache stores before
|
||||
it starts deleting some.
|
||||
:param default_timeout: the default timeout that is used if no timeout is
|
||||
specified on :meth:`~BaseCache.set`. A timeout of
|
||||
0 indicates that the cache never expires.
|
||||
"""
|
||||
|
||||
def __init__(self, threshold=500, default_timeout=300):
|
||||
BaseCache.__init__(self, default_timeout)
|
||||
self._cache = {}
|
||||
self.clear = self._cache.clear
|
||||
self._threshold = threshold
|
||||
|
||||
def _prune(self):
|
||||
if len(self._cache) > self._threshold:
|
||||
now = time()
|
||||
toremove = []
|
||||
for idx, (key, (expires, _)) in enumerate(self._cache.items()):
|
||||
if (expires != 0 and expires <= now) or idx % 3 == 0:
|
||||
toremove.append(key)
|
||||
for key in toremove:
|
||||
self._cache.pop(key, None)
|
||||
|
||||
def _normalize_timeout(self, timeout):
|
||||
timeout = BaseCache._normalize_timeout(self, timeout)
|
||||
if timeout > 0:
|
||||
timeout = time() + timeout
|
||||
return timeout
|
||||
|
||||
def get(self, key):
|
||||
try:
|
||||
expires, value = self._cache[key]
|
||||
if expires == 0 or expires > time():
|
||||
return pickle.loads(value)
|
||||
except (KeyError, pickle.PickleError):
|
||||
return None
|
||||
|
||||
def set(self, key, value, timeout=None):
|
||||
expires = self._normalize_timeout(timeout)
|
||||
self._prune()
|
||||
self._cache[key] = (expires, pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
|
||||
return True
|
||||
|
||||
def add(self, key, value, timeout=None):
|
||||
expires = self._normalize_timeout(timeout)
|
||||
self._prune()
|
||||
item = (expires, pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
|
||||
if key in self._cache:
|
||||
return False
|
||||
self._cache.setdefault(key, item)
|
||||
return True
|
||||
|
||||
def delete(self, key):
|
||||
return self._cache.pop(key, None) is not None
|
||||
|
||||
def has(self, key):
|
||||
try:
|
||||
expires, value = self._cache[key]
|
||||
return expires == 0 or expires > time()
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
|
||||
_test_memcached_key = re.compile(r"[^\x00-\x21\xff]{1,250}$").match
|
||||
|
||||
|
||||
class MemcachedCache(BaseCache):
|
||||
"""A cache that uses memcached as backend.
|
||||
|
||||
The first argument can either be an object that resembles the API of a
|
||||
:class:`memcache.Client` or a tuple/list of server addresses. In the
|
||||
event that a tuple/list is passed, Werkzeug tries to import the best
|
||||
available memcache library.
|
||||
|
||||
This cache looks into the following packages/modules to find bindings for
|
||||
memcached:
|
||||
|
||||
- ``pylibmc``
|
||||
- ``google.appengine.api.memcached``
|
||||
- ``memcached``
|
||||
- ``libmc``
|
||||
|
||||
Implementation notes: This cache backend works around some limitations in
|
||||
memcached to simplify the interface. For example unicode keys are encoded
|
||||
to utf-8 on the fly. Methods such as :meth:`~BaseCache.get_dict` return
|
||||
the keys in the same format as passed. Furthermore all get methods
|
||||
silently ignore key errors to not cause problems when untrusted user data
|
||||
is passed to the get methods which is often the case in web applications.
|
||||
|
||||
:param servers: a list or tuple of server addresses or alternatively
|
||||
a :class:`memcache.Client` or a compatible client.
|
||||
:param default_timeout: the default timeout that is used if no timeout is
|
||||
specified on :meth:`~BaseCache.set`. A timeout of
|
||||
0 indicates that the cache never expires.
|
||||
:param key_prefix: a prefix that is added before all keys. This makes it
|
||||
possible to use the same memcached server for different
|
||||
applications. Keep in mind that
|
||||
:meth:`~BaseCache.clear` will also clear keys with a
|
||||
different prefix.
|
||||
"""
|
||||
|
||||
def __init__(self, servers=None, default_timeout=300, key_prefix=None):
|
||||
BaseCache.__init__(self, default_timeout)
|
||||
if servers is None or isinstance(servers, (list, tuple)):
|
||||
if servers is None:
|
||||
servers = ["127.0.0.1:11211"]
|
||||
self._client = self.import_preferred_memcache_lib(servers)
|
||||
if self._client is None:
|
||||
raise RuntimeError("no memcache module found")
|
||||
else:
|
||||
# NOTE: servers is actually an already initialized memcache
|
||||
# client.
|
||||
self._client = servers
|
||||
|
||||
self.key_prefix = to_native(key_prefix)
|
||||
|
||||
def _normalize_key(self, key):
|
||||
key = to_native(key, "utf-8")
|
||||
if self.key_prefix:
|
||||
key = self.key_prefix + key
|
||||
return key
|
||||
|
||||
def _normalize_timeout(self, timeout):
|
||||
timeout = BaseCache._normalize_timeout(self, timeout)
|
||||
if timeout > 0:
|
||||
timeout = int(time()) + timeout
|
||||
return timeout
|
||||
|
||||
def get(self, key):
|
||||
key = self._normalize_key(key)
|
||||
# memcached doesn't support keys longer than that. Because often
|
||||
# checks for so long keys can occur because it's tested from user
|
||||
# submitted data etc we fail silently for getting.
|
||||
if _test_memcached_key(key):
|
||||
return self._client.get(key)
|
||||
|
||||
def get_dict(self, *keys):
|
||||
key_mapping = {}
|
||||
have_encoded_keys = False
|
||||
for key in keys:
|
||||
encoded_key = self._normalize_key(key)
|
||||
if not isinstance(key, str):
|
||||
have_encoded_keys = True
|
||||
if _test_memcached_key(key):
|
||||
key_mapping[encoded_key] = key
|
||||
_keys = list(key_mapping)
|
||||
d = rv = self._client.get_multi(_keys)
|
||||
if have_encoded_keys or self.key_prefix:
|
||||
rv = {}
|
||||
for key, value in iteritems(d):
|
||||
rv[key_mapping[key]] = value
|
||||
if len(rv) < len(keys):
|
||||
for key in keys:
|
||||
if key not in rv:
|
||||
rv[key] = None
|
||||
return rv
|
||||
|
||||
def add(self, key, value, timeout=None):
|
||||
key = self._normalize_key(key)
|
||||
timeout = self._normalize_timeout(timeout)
|
||||
return self._client.add(key, value, timeout)
|
||||
|
||||
def set(self, key, value, timeout=None):
|
||||
key = self._normalize_key(key)
|
||||
timeout = self._normalize_timeout(timeout)
|
||||
return self._client.set(key, value, timeout)
|
||||
|
||||
def get_many(self, *keys):
|
||||
d = self.get_dict(*keys)
|
||||
return [d[key] for key in keys]
|
||||
|
||||
def set_many(self, mapping, timeout=None):
|
||||
new_mapping = {}
|
||||
for key, value in _items(mapping):
|
||||
key = self._normalize_key(key)
|
||||
new_mapping[key] = value
|
||||
|
||||
timeout = self._normalize_timeout(timeout)
|
||||
failed_keys = self._client.set_multi(new_mapping, timeout)
|
||||
return not failed_keys
|
||||
|
||||
def delete(self, key):
|
||||
key = self._normalize_key(key)
|
||||
if _test_memcached_key(key):
|
||||
return self._client.delete(key)
|
||||
|
||||
def delete_many(self, *keys):
|
||||
new_keys = []
|
||||
for key in keys:
|
||||
key = self._normalize_key(key)
|
||||
if _test_memcached_key(key):
|
||||
new_keys.append(key)
|
||||
return self._client.delete_multi(new_keys)
|
||||
|
||||
def has(self, key):
|
||||
key = self._normalize_key(key)
|
||||
if _test_memcached_key(key):
|
||||
return self._client.append(key, "")
|
||||
return False
|
||||
|
||||
def clear(self):
|
||||
return self._client.flush_all()
|
||||
|
||||
def inc(self, key, delta=1):
|
||||
key = self._normalize_key(key)
|
||||
return self._client.incr(key, delta)
|
||||
|
||||
def dec(self, key, delta=1):
|
||||
key = self._normalize_key(key)
|
||||
return self._client.decr(key, delta)
|
||||
|
||||
def import_preferred_memcache_lib(self, servers):
|
||||
"""Returns an initialized memcache client. Used by the constructor."""
|
||||
try:
|
||||
import pylibmc
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
return pylibmc.Client(servers)
|
||||
|
||||
try:
|
||||
from google.appengine.api import memcache
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
return memcache.Client()
|
||||
|
||||
try:
|
||||
import memcache
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
return memcache.Client(servers)
|
||||
|
||||
try:
|
||||
import libmc
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
return libmc.Client(servers)
|
||||
|
||||
|
||||
# backwards compatibility
|
||||
GAEMemcachedCache = MemcachedCache
|
||||
|
||||
|
||||
class RedisCache(BaseCache):
|
||||
"""Uses the Redis key-value store as a cache backend.
|
||||
|
||||
The first argument can be either a string denoting address of the Redis
|
||||
server or an object resembling an instance of a redis.Redis class.
|
||||
|
||||
Note: Python Redis API already takes care of encoding unicode strings on
|
||||
the fly.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
|
||||
.. versionadded:: 0.8
|
||||
`key_prefix` was added.
|
||||
|
||||
.. versionchanged:: 0.8
|
||||
This cache backend now properly serializes objects.
|
||||
|
||||
.. versionchanged:: 0.8.3
|
||||
This cache backend now supports password authentication.
|
||||
|
||||
.. versionchanged:: 0.10
|
||||
``**kwargs`` is now passed to the redis object.
|
||||
|
||||
:param host: address of the Redis server or an object which API is
|
||||
compatible with the official Python Redis client (redis-py).
|
||||
:param port: port number on which Redis server listens for connections.
|
||||
:param password: password authentication for the Redis server.
|
||||
:param db: db (zero-based numeric index) on Redis Server to connect.
|
||||
:param default_timeout: the default timeout that is used if no timeout is
|
||||
specified on :meth:`~BaseCache.set`. A timeout of
|
||||
0 indicates that the cache never expires.
|
||||
:param key_prefix: A prefix that should be added to all keys.
|
||||
|
||||
Any additional keyword arguments will be passed to ``redis.Redis``.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
host="localhost",
|
||||
port=6379,
|
||||
password=None,
|
||||
db=0,
|
||||
default_timeout=300,
|
||||
key_prefix=None,
|
||||
**kwargs
|
||||
):
|
||||
BaseCache.__init__(self, default_timeout)
|
||||
if host is None:
|
||||
raise ValueError("RedisCache host parameter may not be None")
|
||||
if isinstance(host, string_types):
|
||||
try:
|
||||
import redis
|
||||
except ImportError:
|
||||
raise RuntimeError("no redis module found")
|
||||
if kwargs.get("decode_responses", None):
|
||||
raise ValueError("decode_responses is not supported by RedisCache.")
|
||||
self._client = redis.Redis(
|
||||
host=host, port=port, password=password, db=db, **kwargs
|
||||
)
|
||||
else:
|
||||
self._client = host
|
||||
self.key_prefix = key_prefix or ""
|
||||
|
||||
def _normalize_timeout(self, timeout):
|
||||
timeout = BaseCache._normalize_timeout(self, timeout)
|
||||
if timeout == 0:
|
||||
timeout = -1
|
||||
return timeout
|
||||
|
||||
def dump_object(self, value):
|
||||
"""Dumps an object into a string for redis. By default it serializes
|
||||
integers as regular string and pickle dumps everything else.
|
||||
"""
|
||||
t = type(value)
|
||||
if t in integer_types:
|
||||
return str(value).encode("ascii")
|
||||
return b"!" + pickle.dumps(value)
|
||||
|
||||
def load_object(self, value):
|
||||
"""The reversal of :meth:`dump_object`. This might be called with
|
||||
None.
|
||||
"""
|
||||
if value is None:
|
||||
return None
|
||||
if value.startswith(b"!"):
|
||||
try:
|
||||
return pickle.loads(value[1:])
|
||||
except pickle.PickleError:
|
||||
return None
|
||||
try:
|
||||
return int(value)
|
||||
except ValueError:
|
||||
# before 0.8 we did not have serialization. Still support that.
|
||||
return value
|
||||
|
||||
def get(self, key):
|
||||
return self.load_object(self._client.get(self.key_prefix + key))
|
||||
|
||||
def get_many(self, *keys):
|
||||
if self.key_prefix:
|
||||
keys = [self.key_prefix + key for key in keys]
|
||||
return [self.load_object(x) for x in self._client.mget(keys)]
|
||||
|
||||
def set(self, key, value, timeout=None):
|
||||
timeout = self._normalize_timeout(timeout)
|
||||
dump = self.dump_object(value)
|
||||
if timeout == -1:
|
||||
result = self._client.set(name=self.key_prefix + key, value=dump)
|
||||
else:
|
||||
result = self._client.setex(
|
||||
name=self.key_prefix + key, value=dump, time=timeout
|
||||
)
|
||||
return result
|
||||
|
||||
def add(self, key, value, timeout=None):
|
||||
timeout = self._normalize_timeout(timeout)
|
||||
dump = self.dump_object(value)
|
||||
return self._client.setnx(
|
||||
name=self.key_prefix + key, value=dump
|
||||
) and self._client.expire(name=self.key_prefix + key, time=timeout)
|
||||
|
||||
def set_many(self, mapping, timeout=None):
|
||||
timeout = self._normalize_timeout(timeout)
|
||||
# Use transaction=False to batch without calling redis MULTI
|
||||
# which is not supported by twemproxy
|
||||
pipe = self._client.pipeline(transaction=False)
|
||||
|
||||
for key, value in _items(mapping):
|
||||
dump = self.dump_object(value)
|
||||
if timeout == -1:
|
||||
pipe.set(name=self.key_prefix + key, value=dump)
|
||||
else:
|
||||
pipe.setex(name=self.key_prefix + key, value=dump, time=timeout)
|
||||
return pipe.execute()
|
||||
|
||||
def delete(self, key):
|
||||
return self._client.delete(self.key_prefix + key)
|
||||
|
||||
def delete_many(self, *keys):
|
||||
if not keys:
|
||||
return
|
||||
if self.key_prefix:
|
||||
keys = [self.key_prefix + key for key in keys]
|
||||
return self._client.delete(*keys)
|
||||
|
||||
def has(self, key):
|
||||
return self._client.exists(self.key_prefix + key)
|
||||
|
||||
def clear(self):
|
||||
status = False
|
||||
if self.key_prefix:
|
||||
keys = self._client.keys(self.key_prefix + "*")
|
||||
if keys:
|
||||
status = self._client.delete(*keys)
|
||||
else:
|
||||
status = self._client.flushdb()
|
||||
return status
|
||||
|
||||
def inc(self, key, delta=1):
|
||||
return self._client.incr(name=self.key_prefix + key, amount=delta)
|
||||
|
||||
def dec(self, key, delta=1):
|
||||
return self._client.decr(name=self.key_prefix + key, amount=delta)
|
||||
|
||||
|
||||
class FileSystemCache(BaseCache):
|
||||
"""A cache that stores the items on the file system. This cache depends
|
||||
on being the only user of the `cache_dir`. Make absolutely sure that
|
||||
nobody but this cache stores files there or otherwise the cache will
|
||||
randomly delete files therein.
|
||||
|
||||
:param cache_dir: the directory where cache files are stored.
|
||||
:param threshold: the maximum number of items the cache stores before
|
||||
it starts deleting some. A threshold value of 0
|
||||
indicates no threshold.
|
||||
:param default_timeout: the default timeout that is used if no timeout is
|
||||
specified on :meth:`~BaseCache.set`. A timeout of
|
||||
0 indicates that the cache never expires.
|
||||
:param mode: the file mode wanted for the cache files, default 0600
|
||||
"""
|
||||
|
||||
#: used for temporary files by the FileSystemCache
|
||||
_fs_transaction_suffix = ".__wz_cache"
|
||||
#: keep amount of files in a cache element
|
||||
_fs_count_file = "__wz_cache_count"
|
||||
|
||||
def __init__(self, cache_dir, threshold=500, default_timeout=300, mode=0o600):
|
||||
BaseCache.__init__(self, default_timeout)
|
||||
self._path = cache_dir
|
||||
self._threshold = threshold
|
||||
self._mode = mode
|
||||
|
||||
try:
|
||||
os.makedirs(self._path)
|
||||
except OSError as ex:
|
||||
if ex.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
self._update_count(value=len(self._list_dir()))
|
||||
|
||||
@property
|
||||
def _file_count(self):
|
||||
return self.get(self._fs_count_file) or 0
|
||||
|
||||
def _update_count(self, delta=None, value=None):
|
||||
# If we have no threshold, don't count files
|
||||
if self._threshold == 0:
|
||||
return
|
||||
|
||||
if delta:
|
||||
new_count = self._file_count + delta
|
||||
else:
|
||||
new_count = value or 0
|
||||
self.set(self._fs_count_file, new_count, mgmt_element=True)
|
||||
|
||||
def _normalize_timeout(self, timeout):
|
||||
timeout = BaseCache._normalize_timeout(self, timeout)
|
||||
if timeout != 0:
|
||||
timeout = time() + timeout
|
||||
return int(timeout)
|
||||
|
||||
def _list_dir(self):
|
||||
"""return a list of (fully qualified) cache filenames
|
||||
"""
|
||||
mgmt_files = [
|
||||
self._get_filename(name).split("/")[-1] for name in (self._fs_count_file,)
|
||||
]
|
||||
return [
|
||||
os.path.join(self._path, fn)
|
||||
for fn in os.listdir(self._path)
|
||||
if not fn.endswith(self._fs_transaction_suffix) and fn not in mgmt_files
|
||||
]
|
||||
|
||||
def _prune(self):
|
||||
if self._threshold == 0 or not self._file_count > self._threshold:
|
||||
return
|
||||
|
||||
entries = self._list_dir()
|
||||
now = time()
|
||||
for idx, fname in enumerate(entries):
|
||||
try:
|
||||
remove = False
|
||||
with open(fname, "rb") as f:
|
||||
expires = pickle.load(f)
|
||||
remove = (expires != 0 and expires <= now) or idx % 3 == 0
|
||||
|
||||
if remove:
|
||||
os.remove(fname)
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
self._update_count(value=len(self._list_dir()))
|
||||
|
||||
def clear(self):
|
||||
for fname in self._list_dir():
|
||||
try:
|
||||
os.remove(fname)
|
||||
except (IOError, OSError):
|
||||
self._update_count(value=len(self._list_dir()))
|
||||
return False
|
||||
self._update_count(value=0)
|
||||
return True
|
||||
|
||||
def _get_filename(self, key):
|
||||
if isinstance(key, text_type):
|
||||
key = key.encode("utf-8") # XXX unicode review
|
||||
hash = md5(key).hexdigest()
|
||||
return os.path.join(self._path, hash)
|
||||
|
||||
def get(self, key):
|
||||
filename = self._get_filename(key)
|
||||
try:
|
||||
with open(filename, "rb") as f:
|
||||
pickle_time = pickle.load(f)
|
||||
if pickle_time == 0 or pickle_time >= time():
|
||||
return pickle.load(f)
|
||||
else:
|
||||
os.remove(filename)
|
||||
return None
|
||||
except (IOError, OSError, pickle.PickleError):
|
||||
return None
|
||||
|
||||
def add(self, key, value, timeout=None):
|
||||
filename = self._get_filename(key)
|
||||
if not os.path.exists(filename):
|
||||
return self.set(key, value, timeout)
|
||||
return False
|
||||
|
||||
def set(self, key, value, timeout=None, mgmt_element=False):
|
||||
# Management elements have no timeout
|
||||
if mgmt_element:
|
||||
timeout = 0
|
||||
|
||||
# Don't prune on management element update, to avoid loop
|
||||
else:
|
||||
self._prune()
|
||||
|
||||
timeout = self._normalize_timeout(timeout)
|
||||
filename = self._get_filename(key)
|
||||
try:
|
||||
fd, tmp = tempfile.mkstemp(
|
||||
suffix=self._fs_transaction_suffix, dir=self._path
|
||||
)
|
||||
with os.fdopen(fd, "wb") as f:
|
||||
pickle.dump(timeout, f, 1)
|
||||
pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
|
||||
rename(tmp, filename)
|
||||
os.chmod(filename, self._mode)
|
||||
except (IOError, OSError):
|
||||
return False
|
||||
else:
|
||||
# Management elements should not count towards threshold
|
||||
if not mgmt_element:
|
||||
self._update_count(delta=1)
|
||||
return True
|
||||
|
||||
def delete(self, key, mgmt_element=False):
|
||||
try:
|
||||
os.remove(self._get_filename(key))
|
||||
except (IOError, OSError):
|
||||
return False
|
||||
else:
|
||||
# Management elements should not count towards threshold
|
||||
if not mgmt_element:
|
||||
self._update_count(delta=-1)
|
||||
return True
|
||||
|
||||
def has(self, key):
|
||||
filename = self._get_filename(key)
|
||||
try:
|
||||
with open(filename, "rb") as f:
|
||||
pickle_time = pickle.load(f)
|
||||
if pickle_time == 0 or pickle_time >= time():
|
||||
return True
|
||||
else:
|
||||
os.remove(filename)
|
||||
return False
|
||||
except (IOError, OSError, pickle.PickleError):
|
||||
return False
|
||||
|
||||
|
||||
class UWSGICache(BaseCache):
|
||||
"""Implements the cache using uWSGI's caching framework.
|
||||
|
||||
.. note::
|
||||
This class cannot be used when running under PyPy, because the uWSGI
|
||||
API implementation for PyPy is lacking the needed functionality.
|
||||
|
||||
:param default_timeout: The default timeout in seconds.
|
||||
:param cache: The name of the caching instance to connect to, for
|
||||
example: mycache@localhost:3031, defaults to an empty string, which
|
||||
means uWSGI will cache in the local instance. If the cache is in the
|
||||
same instance as the werkzeug app, you only have to provide the name of
|
||||
the cache.
|
||||
"""
|
||||
|
||||
def __init__(self, default_timeout=300, cache=""):
|
||||
BaseCache.__init__(self, default_timeout)
|
||||
|
||||
if platform.python_implementation() == "PyPy":
|
||||
raise RuntimeError(
|
||||
"uWSGI caching does not work under PyPy, see "
|
||||
"the docs for more details."
|
||||
)
|
||||
|
||||
try:
|
||||
import uwsgi
|
||||
|
||||
self._uwsgi = uwsgi
|
||||
except ImportError:
|
||||
raise RuntimeError(
|
||||
"uWSGI could not be imported, are you running under uWSGI?"
|
||||
)
|
||||
|
||||
self.cache = cache
|
||||
|
||||
def get(self, key):
|
||||
rv = self._uwsgi.cache_get(key, self.cache)
|
||||
if rv is None:
|
||||
return
|
||||
return pickle.loads(rv)
|
||||
|
||||
def delete(self, key):
|
||||
return self._uwsgi.cache_del(key, self.cache)
|
||||
|
||||
def set(self, key, value, timeout=None):
|
||||
return self._uwsgi.cache_update(
|
||||
key, pickle.dumps(value), self._normalize_timeout(timeout), self.cache
|
||||
)
|
||||
|
||||
def add(self, key, value, timeout=None):
|
||||
return self._uwsgi.cache_set(
|
||||
key, pickle.dumps(value), self._normalize_timeout(timeout), self.cache
|
||||
)
|
||||
|
||||
def clear(self):
|
||||
return self._uwsgi.cache_clear(self.cache)
|
||||
|
||||
def has(self, key):
|
||||
return self._uwsgi.cache_exists(key, self.cache) is not None
|
||||
262
python/werkzeug/contrib/fixers.py
Normal file
262
python/werkzeug/contrib/fixers.py
Normal file
@@ -0,0 +1,262 @@
|
||||
"""
|
||||
Fixers
|
||||
======
|
||||
|
||||
.. warning::
|
||||
.. deprecated:: 0.15
|
||||
``ProxyFix`` has moved to :mod:`werkzeug.middleware.proxy_fix`.
|
||||
All other code in this module is deprecated and will be removed
|
||||
in version 1.0.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
|
||||
This module includes various helpers that fix web server behavior.
|
||||
|
||||
.. autoclass:: ProxyFix
|
||||
:members:
|
||||
|
||||
.. autoclass:: CGIRootFix
|
||||
|
||||
.. autoclass:: PathInfoFromRequestUriFix
|
||||
|
||||
.. autoclass:: HeaderRewriterFix
|
||||
|
||||
.. autoclass:: InternetExplorerFix
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import warnings
|
||||
|
||||
from ..datastructures import Headers
|
||||
from ..datastructures import ResponseCacheControl
|
||||
from ..http import parse_cache_control_header
|
||||
from ..http import parse_options_header
|
||||
from ..http import parse_set_header
|
||||
from ..middleware.proxy_fix import ProxyFix as _ProxyFix
|
||||
from ..useragents import UserAgent
|
||||
|
||||
try:
|
||||
from urllib.parse import unquote
|
||||
except ImportError:
|
||||
from urllib import unquote
|
||||
|
||||
|
||||
class CGIRootFix(object):
|
||||
"""Wrap the application in this middleware if you are using FastCGI
|
||||
or CGI and you have problems with your app root being set to the CGI
|
||||
script's path instead of the path users are going to visit.
|
||||
|
||||
:param app: the WSGI application
|
||||
:param app_root: Defaulting to ``'/'``, you can set this to
|
||||
something else if your app is mounted somewhere else.
|
||||
|
||||
.. deprecated:: 0.15
|
||||
This middleware will be removed in version 1.0.
|
||||
|
||||
.. versionchanged:: 0.9
|
||||
Added `app_root` parameter and renamed from
|
||||
``LighttpdCGIRootFix``.
|
||||
"""
|
||||
|
||||
def __init__(self, app, app_root="/"):
|
||||
warnings.warn(
|
||||
"'CGIRootFix' is deprecated as of version 0.15 and will be"
|
||||
" removed in version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
self.app = app
|
||||
self.app_root = app_root.strip("/")
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
environ["SCRIPT_NAME"] = self.app_root
|
||||
return self.app(environ, start_response)
|
||||
|
||||
|
||||
class LighttpdCGIRootFix(CGIRootFix):
|
||||
def __init__(self, *args, **kwargs):
|
||||
warnings.warn(
|
||||
"'LighttpdCGIRootFix' is renamed 'CGIRootFix'. Both will be"
|
||||
" removed in version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
super(LighttpdCGIRootFix, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class PathInfoFromRequestUriFix(object):
|
||||
"""On windows environment variables are limited to the system charset
|
||||
which makes it impossible to store the `PATH_INFO` variable in the
|
||||
environment without loss of information on some systems.
|
||||
|
||||
This is for example a problem for CGI scripts on a Windows Apache.
|
||||
|
||||
This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`,
|
||||
`REQUEST_URL`, or `UNENCODED_URL` (whatever is available). Thus the
|
||||
fix can only be applied if the webserver supports either of these
|
||||
variables.
|
||||
|
||||
:param app: the WSGI application
|
||||
|
||||
.. deprecated:: 0.15
|
||||
This middleware will be removed in version 1.0.
|
||||
"""
|
||||
|
||||
def __init__(self, app):
|
||||
warnings.warn(
|
||||
"'PathInfoFromRequestUriFix' is deprecated as of version"
|
||||
" 0.15 and will be removed in version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
self.app = app
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
for key in "REQUEST_URL", "REQUEST_URI", "UNENCODED_URL":
|
||||
if key not in environ:
|
||||
continue
|
||||
request_uri = unquote(environ[key])
|
||||
script_name = unquote(environ.get("SCRIPT_NAME", ""))
|
||||
if request_uri.startswith(script_name):
|
||||
environ["PATH_INFO"] = request_uri[len(script_name) :].split("?", 1)[0]
|
||||
break
|
||||
return self.app(environ, start_response)
|
||||
|
||||
|
||||
class ProxyFix(_ProxyFix):
|
||||
"""
|
||||
.. deprecated:: 0.15
|
||||
``werkzeug.contrib.fixers.ProxyFix`` has moved to
|
||||
:mod:`werkzeug.middleware.proxy_fix`. This import will be
|
||||
removed in 1.0.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
warnings.warn(
|
||||
"'werkzeug.contrib.fixers.ProxyFix' has moved to 'werkzeug"
|
||||
".middleware.proxy_fix.ProxyFix'. This import is deprecated"
|
||||
" as of version 0.15 and will be removed in 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
super(ProxyFix, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class HeaderRewriterFix(object):
|
||||
"""This middleware can remove response headers and add others. This
|
||||
is for example useful to remove the `Date` header from responses if you
|
||||
are using a server that adds that header, no matter if it's present or
|
||||
not or to add `X-Powered-By` headers::
|
||||
|
||||
app = HeaderRewriterFix(app, remove_headers=['Date'],
|
||||
add_headers=[('X-Powered-By', 'WSGI')])
|
||||
|
||||
:param app: the WSGI application
|
||||
:param remove_headers: a sequence of header keys that should be
|
||||
removed.
|
||||
:param add_headers: a sequence of ``(key, value)`` tuples that should
|
||||
be added.
|
||||
|
||||
.. deprecated:: 0.15
|
||||
This middleware will be removed in 1.0.
|
||||
"""
|
||||
|
||||
def __init__(self, app, remove_headers=None, add_headers=None):
|
||||
warnings.warn(
|
||||
"'HeaderRewriterFix' is deprecated as of version 0.15 and"
|
||||
" will be removed in version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
self.app = app
|
||||
self.remove_headers = set(x.lower() for x in (remove_headers or ()))
|
||||
self.add_headers = list(add_headers or ())
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
def rewriting_start_response(status, headers, exc_info=None):
|
||||
new_headers = []
|
||||
for key, value in headers:
|
||||
if key.lower() not in self.remove_headers:
|
||||
new_headers.append((key, value))
|
||||
new_headers += self.add_headers
|
||||
return start_response(status, new_headers, exc_info)
|
||||
|
||||
return self.app(environ, rewriting_start_response)
|
||||
|
||||
|
||||
class InternetExplorerFix(object):
|
||||
"""This middleware fixes a couple of bugs with Microsoft Internet
|
||||
Explorer. Currently the following fixes are applied:
|
||||
|
||||
- removing of `Vary` headers for unsupported mimetypes which
|
||||
causes troubles with caching. Can be disabled by passing
|
||||
``fix_vary=False`` to the constructor.
|
||||
see: https://support.microsoft.com/en-us/help/824847
|
||||
|
||||
- removes offending headers to work around caching bugs in
|
||||
Internet Explorer if `Content-Disposition` is set. Can be
|
||||
disabled by passing ``fix_attach=False`` to the constructor.
|
||||
|
||||
If it does not detect affected Internet Explorer versions it won't touch
|
||||
the request / response.
|
||||
|
||||
.. deprecated:: 0.15
|
||||
This middleware will be removed in 1.0.
|
||||
"""
|
||||
|
||||
# This code was inspired by Django fixers for the same bugs. The
|
||||
# fix_vary and fix_attach fixers were originally implemented in Django
|
||||
# by Michael Axiak and is available as part of the Django project:
|
||||
# https://code.djangoproject.com/ticket/4148
|
||||
|
||||
def __init__(self, app, fix_vary=True, fix_attach=True):
|
||||
warnings.warn(
|
||||
"'InternetExplorerFix' is deprecated as of version 0.15 and"
|
||||
" will be removed in version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
self.app = app
|
||||
self.fix_vary = fix_vary
|
||||
self.fix_attach = fix_attach
|
||||
|
||||
def fix_headers(self, environ, headers, status=None):
|
||||
if self.fix_vary:
|
||||
header = headers.get("content-type", "")
|
||||
mimetype, options = parse_options_header(header)
|
||||
if mimetype not in ("text/html", "text/plain", "text/sgml"):
|
||||
headers.pop("vary", None)
|
||||
|
||||
if self.fix_attach and "content-disposition" in headers:
|
||||
pragma = parse_set_header(headers.get("pragma", ""))
|
||||
pragma.discard("no-cache")
|
||||
header = pragma.to_header()
|
||||
if not header:
|
||||
headers.pop("pragma", "")
|
||||
else:
|
||||
headers["Pragma"] = header
|
||||
header = headers.get("cache-control", "")
|
||||
if header:
|
||||
cc = parse_cache_control_header(header, cls=ResponseCacheControl)
|
||||
cc.no_cache = None
|
||||
cc.no_store = False
|
||||
header = cc.to_header()
|
||||
if not header:
|
||||
headers.pop("cache-control", "")
|
||||
else:
|
||||
headers["Cache-Control"] = header
|
||||
|
||||
def run_fixed(self, environ, start_response):
|
||||
def fixing_start_response(status, headers, exc_info=None):
|
||||
headers = Headers(headers)
|
||||
self.fix_headers(environ, headers, status)
|
||||
return start_response(status, headers.to_wsgi_list(), exc_info)
|
||||
|
||||
return self.app(environ, fixing_start_response)
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
ua = UserAgent(environ)
|
||||
if ua.browser != "msie":
|
||||
return self.app(environ, start_response)
|
||||
return self.run_fixed(environ, start_response)
|
||||
358
python/werkzeug/contrib/iterio.py
Normal file
358
python/werkzeug/contrib/iterio.py
Normal file
@@ -0,0 +1,358 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
r"""
|
||||
werkzeug.contrib.iterio
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module implements a :class:`IterIO` that converts an iterator into
|
||||
a stream object and the other way round. Converting streams into
|
||||
iterators requires the `greenlet`_ module.
|
||||
|
||||
To convert an iterator into a stream all you have to do is to pass it
|
||||
directly to the :class:`IterIO` constructor. In this example we pass it
|
||||
a newly created generator::
|
||||
|
||||
def foo():
|
||||
yield "something\n"
|
||||
yield "otherthings"
|
||||
stream = IterIO(foo())
|
||||
print stream.read() # read the whole iterator
|
||||
|
||||
The other way round works a bit different because we have to ensure that
|
||||
the code execution doesn't take place yet. An :class:`IterIO` call with a
|
||||
callable as first argument does two things. The function itself is passed
|
||||
an :class:`IterIO` stream it can feed. The object returned by the
|
||||
:class:`IterIO` constructor on the other hand is not an stream object but
|
||||
an iterator::
|
||||
|
||||
def foo(stream):
|
||||
stream.write("some")
|
||||
stream.write("thing")
|
||||
stream.flush()
|
||||
stream.write("otherthing")
|
||||
iterator = IterIO(foo)
|
||||
print iterator.next() # prints something
|
||||
print iterator.next() # prints otherthing
|
||||
iterator.next() # raises StopIteration
|
||||
|
||||
.. _greenlet: https://github.com/python-greenlet/greenlet
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import warnings
|
||||
|
||||
from .._compat import implements_iterator
|
||||
|
||||
try:
|
||||
import greenlet
|
||||
except ImportError:
|
||||
greenlet = None
|
||||
|
||||
warnings.warn(
|
||||
"'werkzeug.contrib.iterio' is deprecated as of version 0.15 and"
|
||||
" will be removed in version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
|
||||
def _mixed_join(iterable, sentinel):
|
||||
"""concatenate any string type in an intelligent way."""
|
||||
iterator = iter(iterable)
|
||||
first_item = next(iterator, sentinel)
|
||||
if isinstance(first_item, bytes):
|
||||
return first_item + b"".join(iterator)
|
||||
return first_item + u"".join(iterator)
|
||||
|
||||
|
||||
def _newline(reference_string):
|
||||
if isinstance(reference_string, bytes):
|
||||
return b"\n"
|
||||
return u"\n"
|
||||
|
||||
|
||||
@implements_iterator
|
||||
class IterIO(object):
|
||||
"""Instances of this object implement an interface compatible with the
|
||||
standard Python :class:`file` object. Streams are either read-only or
|
||||
write-only depending on how the object is created.
|
||||
|
||||
If the first argument is an iterable a file like object is returned that
|
||||
returns the contents of the iterable. In case the iterable is empty
|
||||
read operations will return the sentinel value.
|
||||
|
||||
If the first argument is a callable then the stream object will be
|
||||
created and passed to that function. The caller itself however will
|
||||
not receive a stream but an iterable. The function will be executed
|
||||
step by step as something iterates over the returned iterable. Each
|
||||
call to :meth:`flush` will create an item for the iterable. If
|
||||
:meth:`flush` is called without any writes in-between the sentinel
|
||||
value will be yielded.
|
||||
|
||||
Note for Python 3: due to the incompatible interface of bytes and
|
||||
streams you should set the sentinel value explicitly to an empty
|
||||
bytestring (``b''``) if you are expecting to deal with bytes as
|
||||
otherwise the end of the stream is marked with the wrong sentinel
|
||||
value.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
`sentinel` parameter was added.
|
||||
"""
|
||||
|
||||
def __new__(cls, obj, sentinel=""):
|
||||
try:
|
||||
iterator = iter(obj)
|
||||
except TypeError:
|
||||
return IterI(obj, sentinel)
|
||||
return IterO(iterator, sentinel)
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def tell(self):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
return self.pos
|
||||
|
||||
def isatty(self):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
return False
|
||||
|
||||
def seek(self, pos, mode=0):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
raise IOError(9, "Bad file descriptor")
|
||||
|
||||
def truncate(self, size=None):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
raise IOError(9, "Bad file descriptor")
|
||||
|
||||
def write(self, s):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
raise IOError(9, "Bad file descriptor")
|
||||
|
||||
def writelines(self, list):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
raise IOError(9, "Bad file descriptor")
|
||||
|
||||
def read(self, n=-1):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
raise IOError(9, "Bad file descriptor")
|
||||
|
||||
def readlines(self, sizehint=0):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
raise IOError(9, "Bad file descriptor")
|
||||
|
||||
def readline(self, length=None):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
raise IOError(9, "Bad file descriptor")
|
||||
|
||||
def flush(self):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
raise IOError(9, "Bad file descriptor")
|
||||
|
||||
def __next__(self):
|
||||
if self.closed:
|
||||
raise StopIteration()
|
||||
line = self.readline()
|
||||
if not line:
|
||||
raise StopIteration()
|
||||
return line
|
||||
|
||||
|
||||
class IterI(IterIO):
|
||||
"""Convert an stream into an iterator."""
|
||||
|
||||
def __new__(cls, func, sentinel=""):
|
||||
if greenlet is None:
|
||||
raise RuntimeError("IterI requires greenlet support")
|
||||
stream = object.__new__(cls)
|
||||
stream._parent = greenlet.getcurrent()
|
||||
stream._buffer = []
|
||||
stream.closed = False
|
||||
stream.sentinel = sentinel
|
||||
stream.pos = 0
|
||||
|
||||
def run():
|
||||
func(stream)
|
||||
stream.close()
|
||||
|
||||
g = greenlet.greenlet(run, stream._parent)
|
||||
while 1:
|
||||
rv = g.switch()
|
||||
if not rv:
|
||||
return
|
||||
yield rv[0]
|
||||
|
||||
def close(self):
|
||||
if not self.closed:
|
||||
self.closed = True
|
||||
self._flush_impl()
|
||||
|
||||
def write(self, s):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
if s:
|
||||
self.pos += len(s)
|
||||
self._buffer.append(s)
|
||||
|
||||
def writelines(self, list):
|
||||
for item in list:
|
||||
self.write(item)
|
||||
|
||||
def flush(self):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
self._flush_impl()
|
||||
|
||||
def _flush_impl(self):
|
||||
data = _mixed_join(self._buffer, self.sentinel)
|
||||
self._buffer = []
|
||||
if not data and self.closed:
|
||||
self._parent.switch()
|
||||
else:
|
||||
self._parent.switch((data,))
|
||||
|
||||
|
||||
class IterO(IterIO):
|
||||
"""Iter output. Wrap an iterator and give it a stream like interface."""
|
||||
|
||||
def __new__(cls, gen, sentinel=""):
|
||||
self = object.__new__(cls)
|
||||
self._gen = gen
|
||||
self._buf = None
|
||||
self.sentinel = sentinel
|
||||
self.closed = False
|
||||
self.pos = 0
|
||||
return self
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def _buf_append(self, string):
|
||||
"""Replace string directly without appending to an empty string,
|
||||
avoiding type issues."""
|
||||
if not self._buf:
|
||||
self._buf = string
|
||||
else:
|
||||
self._buf += string
|
||||
|
||||
def close(self):
|
||||
if not self.closed:
|
||||
self.closed = True
|
||||
if hasattr(self._gen, "close"):
|
||||
self._gen.close()
|
||||
|
||||
def seek(self, pos, mode=0):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
if mode == 1:
|
||||
pos += self.pos
|
||||
elif mode == 2:
|
||||
self.read()
|
||||
self.pos = min(self.pos, self.pos + pos)
|
||||
return
|
||||
elif mode != 0:
|
||||
raise IOError("Invalid argument")
|
||||
buf = []
|
||||
try:
|
||||
tmp_end_pos = len(self._buf or "")
|
||||
while pos > tmp_end_pos:
|
||||
item = next(self._gen)
|
||||
tmp_end_pos += len(item)
|
||||
buf.append(item)
|
||||
except StopIteration:
|
||||
pass
|
||||
if buf:
|
||||
self._buf_append(_mixed_join(buf, self.sentinel))
|
||||
self.pos = max(0, pos)
|
||||
|
||||
def read(self, n=-1):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
if n < 0:
|
||||
self._buf_append(_mixed_join(self._gen, self.sentinel))
|
||||
result = self._buf[self.pos :]
|
||||
self.pos += len(result)
|
||||
return result
|
||||
new_pos = self.pos + n
|
||||
buf = []
|
||||
try:
|
||||
tmp_end_pos = 0 if self._buf is None else len(self._buf)
|
||||
while new_pos > tmp_end_pos or (self._buf is None and not buf):
|
||||
item = next(self._gen)
|
||||
tmp_end_pos += len(item)
|
||||
buf.append(item)
|
||||
except StopIteration:
|
||||
pass
|
||||
if buf:
|
||||
self._buf_append(_mixed_join(buf, self.sentinel))
|
||||
|
||||
if self._buf is None:
|
||||
return self.sentinel
|
||||
|
||||
new_pos = max(0, new_pos)
|
||||
try:
|
||||
return self._buf[self.pos : new_pos]
|
||||
finally:
|
||||
self.pos = min(new_pos, len(self._buf))
|
||||
|
||||
def readline(self, length=None):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
|
||||
nl_pos = -1
|
||||
if self._buf:
|
||||
nl_pos = self._buf.find(_newline(self._buf), self.pos)
|
||||
buf = []
|
||||
try:
|
||||
if self._buf is None:
|
||||
pos = self.pos
|
||||
else:
|
||||
pos = len(self._buf)
|
||||
while nl_pos < 0:
|
||||
item = next(self._gen)
|
||||
local_pos = item.find(_newline(item))
|
||||
buf.append(item)
|
||||
if local_pos >= 0:
|
||||
nl_pos = pos + local_pos
|
||||
break
|
||||
pos += len(item)
|
||||
except StopIteration:
|
||||
pass
|
||||
if buf:
|
||||
self._buf_append(_mixed_join(buf, self.sentinel))
|
||||
|
||||
if self._buf is None:
|
||||
return self.sentinel
|
||||
|
||||
if nl_pos < 0:
|
||||
new_pos = len(self._buf)
|
||||
else:
|
||||
new_pos = nl_pos + 1
|
||||
if length is not None and self.pos + length < new_pos:
|
||||
new_pos = self.pos + length
|
||||
try:
|
||||
return self._buf[self.pos : new_pos]
|
||||
finally:
|
||||
self.pos = min(new_pos, len(self._buf))
|
||||
|
||||
def readlines(self, sizehint=0):
|
||||
total = 0
|
||||
lines = []
|
||||
line = self.readline()
|
||||
while line:
|
||||
lines.append(line)
|
||||
total += len(line)
|
||||
if 0 < sizehint <= total:
|
||||
break
|
||||
line = self.readline()
|
||||
return lines
|
||||
11
python/werkzeug/contrib/lint.py
Normal file
11
python/werkzeug/contrib/lint.py
Normal file
@@ -0,0 +1,11 @@
|
||||
import warnings
|
||||
|
||||
from ..middleware.lint import * # noqa: F401, F403
|
||||
|
||||
warnings.warn(
|
||||
"'werkzeug.contrib.lint' has moved to 'werkzeug.middleware.lint'."
|
||||
" This import is deprecated as of version 0.15 and will be removed"
|
||||
" in version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
42
python/werkzeug/contrib/profiler.py
Normal file
42
python/werkzeug/contrib/profiler.py
Normal file
@@ -0,0 +1,42 @@
|
||||
import warnings
|
||||
|
||||
from ..middleware.profiler import * # noqa: F401, F403
|
||||
|
||||
warnings.warn(
|
||||
"'werkzeug.contrib.profiler' has moved to"
|
||||
"'werkzeug.middleware.profiler'. This import is deprecated as of"
|
||||
"version 0.15 and will be removed in version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
|
||||
class MergeStream(object):
|
||||
"""An object that redirects ``write`` calls to multiple streams.
|
||||
Use this to log to both ``sys.stdout`` and a file::
|
||||
|
||||
f = open('profiler.log', 'w')
|
||||
stream = MergeStream(sys.stdout, f)
|
||||
profiler = ProfilerMiddleware(app, stream)
|
||||
|
||||
.. deprecated:: 0.15
|
||||
Use the ``tee`` command in your terminal instead. This class
|
||||
will be removed in 1.0.
|
||||
"""
|
||||
|
||||
def __init__(self, *streams):
|
||||
warnings.warn(
|
||||
"'MergeStream' is deprecated as of version 0.15 and will be removed in"
|
||||
" version 1.0. Use your terminal's 'tee' command instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
if not streams:
|
||||
raise TypeError("At least one stream must be given.")
|
||||
|
||||
self.streams = streams
|
||||
|
||||
def write(self, data):
|
||||
for stream in self.streams:
|
||||
stream.write(data)
|
||||
362
python/werkzeug/contrib/securecookie.py
Normal file
362
python/werkzeug/contrib/securecookie.py
Normal file
@@ -0,0 +1,362 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
r"""
|
||||
werkzeug.contrib.securecookie
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module implements a cookie that is not alterable from the client
|
||||
because it adds a checksum the server checks for. You can use it as
|
||||
session replacement if all you have is a user id or something to mark
|
||||
a logged in user.
|
||||
|
||||
Keep in mind that the data is still readable from the client as a
|
||||
normal cookie is. However you don't have to store and flush the
|
||||
sessions you have at the server.
|
||||
|
||||
Example usage:
|
||||
|
||||
>>> from werkzeug.contrib.securecookie import SecureCookie
|
||||
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
|
||||
|
||||
Dumping into a string so that one can store it in a cookie:
|
||||
|
||||
>>> value = x.serialize()
|
||||
|
||||
Loading from that string again:
|
||||
|
||||
>>> x = SecureCookie.unserialize(value, "deadbeef")
|
||||
>>> x["baz"]
|
||||
(1, 2, 3)
|
||||
|
||||
If someone modifies the cookie and the checksum is wrong the unserialize
|
||||
method will fail silently and return a new empty `SecureCookie` object.
|
||||
|
||||
Keep in mind that the values will be visible in the cookie so do not
|
||||
store data in a cookie you don't want the user to see.
|
||||
|
||||
Application Integration
|
||||
=======================
|
||||
|
||||
If you are using the werkzeug request objects you could integrate the
|
||||
secure cookie into your application like this::
|
||||
|
||||
from werkzeug.utils import cached_property
|
||||
from werkzeug.wrappers import BaseRequest
|
||||
from werkzeug.contrib.securecookie import SecureCookie
|
||||
|
||||
# don't use this key but a different one; you could just use
|
||||
# os.urandom(20) to get something random
|
||||
SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea'
|
||||
|
||||
class Request(BaseRequest):
|
||||
|
||||
@cached_property
|
||||
def client_session(self):
|
||||
data = self.cookies.get('session_data')
|
||||
if not data:
|
||||
return SecureCookie(secret_key=SECRET_KEY)
|
||||
return SecureCookie.unserialize(data, SECRET_KEY)
|
||||
|
||||
def application(environ, start_response):
|
||||
request = Request(environ)
|
||||
|
||||
# get a response object here
|
||||
response = ...
|
||||
|
||||
if request.client_session.should_save:
|
||||
session_data = request.client_session.serialize()
|
||||
response.set_cookie('session_data', session_data,
|
||||
httponly=True)
|
||||
return response(environ, start_response)
|
||||
|
||||
A less verbose integration can be achieved by using shorthand methods::
|
||||
|
||||
class Request(BaseRequest):
|
||||
|
||||
@cached_property
|
||||
def client_session(self):
|
||||
return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET)
|
||||
|
||||
def application(environ, start_response):
|
||||
request = Request(environ)
|
||||
|
||||
# get a response object here
|
||||
response = ...
|
||||
|
||||
request.client_session.save_cookie(response)
|
||||
return response(environ, start_response)
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import base64
|
||||
import pickle
|
||||
import warnings
|
||||
from hashlib import sha1 as _default_hash
|
||||
from hmac import new as hmac
|
||||
from time import time
|
||||
|
||||
from .._compat import iteritems
|
||||
from .._compat import text_type
|
||||
from .._compat import to_bytes
|
||||
from .._compat import to_native
|
||||
from .._internal import _date_to_unix
|
||||
from ..contrib.sessions import ModificationTrackingDict
|
||||
from ..security import safe_str_cmp
|
||||
from ..urls import url_quote_plus
|
||||
from ..urls import url_unquote_plus
|
||||
|
||||
warnings.warn(
|
||||
"'werkzeug.contrib.securecookie' is deprecated as of version 0.15"
|
||||
" and will be removed in version 1.0. It has moved to"
|
||||
" https://github.com/pallets/secure-cookie.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
|
||||
class UnquoteError(Exception):
|
||||
"""Internal exception used to signal failures on quoting."""
|
||||
|
||||
|
||||
class SecureCookie(ModificationTrackingDict):
|
||||
"""Represents a secure cookie. You can subclass this class and provide
|
||||
an alternative mac method. The import thing is that the mac method
|
||||
is a function with a similar interface to the hashlib. Required
|
||||
methods are update() and digest().
|
||||
|
||||
Example usage:
|
||||
|
||||
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
|
||||
>>> x["foo"]
|
||||
42
|
||||
>>> x["baz"]
|
||||
(1, 2, 3)
|
||||
>>> x["blafasel"] = 23
|
||||
>>> x.should_save
|
||||
True
|
||||
|
||||
:param data: the initial data. Either a dict, list of tuples or `None`.
|
||||
:param secret_key: the secret key. If not set `None` or not specified
|
||||
it has to be set before :meth:`serialize` is called.
|
||||
:param new: The initial value of the `new` flag.
|
||||
"""
|
||||
|
||||
#: The hash method to use. This has to be a module with a new function
|
||||
#: or a function that creates a hashlib object. Such as `hashlib.md5`
|
||||
#: Subclasses can override this attribute. The default hash is sha1.
|
||||
#: Make sure to wrap this in staticmethod() if you store an arbitrary
|
||||
#: function there such as hashlib.sha1 which might be implemented
|
||||
#: as a function.
|
||||
hash_method = staticmethod(_default_hash)
|
||||
|
||||
#: The module used for serialization. Should have a ``dumps`` and a
|
||||
#: ``loads`` method that takes bytes. The default is :mod:`pickle`.
|
||||
#:
|
||||
#: .. versionchanged:: 0.15
|
||||
#: The default of ``pickle`` will change to :mod:`json` in 1.0.
|
||||
serialization_method = pickle
|
||||
|
||||
#: if the contents should be base64 quoted. This can be disabled if the
|
||||
#: serialization process returns cookie safe strings only.
|
||||
quote_base64 = True
|
||||
|
||||
def __init__(self, data=None, secret_key=None, new=True):
|
||||
ModificationTrackingDict.__init__(self, data or ())
|
||||
# explicitly convert it into a bytestring because python 2.6
|
||||
# no longer performs an implicit string conversion on hmac
|
||||
if secret_key is not None:
|
||||
secret_key = to_bytes(secret_key, "utf-8")
|
||||
self.secret_key = secret_key
|
||||
self.new = new
|
||||
|
||||
if self.serialization_method is pickle:
|
||||
warnings.warn(
|
||||
"The default 'SecureCookie.serialization_method' will"
|
||||
" change from pickle to json in version 1.0. To upgrade"
|
||||
" existing tokens, override 'unquote' to try pickle if"
|
||||
" json fails.",
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s %s%s>" % (
|
||||
self.__class__.__name__,
|
||||
dict.__repr__(self),
|
||||
"*" if self.should_save else "",
|
||||
)
|
||||
|
||||
@property
|
||||
def should_save(self):
|
||||
"""True if the session should be saved. By default this is only true
|
||||
for :attr:`modified` cookies, not :attr:`new`.
|
||||
"""
|
||||
return self.modified
|
||||
|
||||
@classmethod
|
||||
def quote(cls, value):
|
||||
"""Quote the value for the cookie. This can be any object supported
|
||||
by :attr:`serialization_method`.
|
||||
|
||||
:param value: the value to quote.
|
||||
"""
|
||||
if cls.serialization_method is not None:
|
||||
value = cls.serialization_method.dumps(value)
|
||||
if cls.quote_base64:
|
||||
value = b"".join(
|
||||
base64.b64encode(to_bytes(value, "utf8")).splitlines()
|
||||
).strip()
|
||||
return value
|
||||
|
||||
@classmethod
|
||||
def unquote(cls, value):
|
||||
"""Unquote the value for the cookie. If unquoting does not work a
|
||||
:exc:`UnquoteError` is raised.
|
||||
|
||||
:param value: the value to unquote.
|
||||
"""
|
||||
try:
|
||||
if cls.quote_base64:
|
||||
value = base64.b64decode(value)
|
||||
if cls.serialization_method is not None:
|
||||
value = cls.serialization_method.loads(value)
|
||||
return value
|
||||
except Exception:
|
||||
# unfortunately pickle and other serialization modules can
|
||||
# cause pretty every error here. if we get one we catch it
|
||||
# and convert it into an UnquoteError
|
||||
raise UnquoteError()
|
||||
|
||||
def serialize(self, expires=None):
|
||||
"""Serialize the secure cookie into a string.
|
||||
|
||||
If expires is provided, the session will be automatically invalidated
|
||||
after expiration when you unseralize it. This provides better
|
||||
protection against session cookie theft.
|
||||
|
||||
:param expires: an optional expiration date for the cookie (a
|
||||
:class:`datetime.datetime` object)
|
||||
"""
|
||||
if self.secret_key is None:
|
||||
raise RuntimeError("no secret key defined")
|
||||
if expires:
|
||||
self["_expires"] = _date_to_unix(expires)
|
||||
result = []
|
||||
mac = hmac(self.secret_key, None, self.hash_method)
|
||||
for key, value in sorted(self.items()):
|
||||
result.append(
|
||||
(
|
||||
"%s=%s" % (url_quote_plus(key), self.quote(value).decode("ascii"))
|
||||
).encode("ascii")
|
||||
)
|
||||
mac.update(b"|" + result[-1])
|
||||
return b"?".join([base64.b64encode(mac.digest()).strip(), b"&".join(result)])
|
||||
|
||||
@classmethod
|
||||
def unserialize(cls, string, secret_key):
|
||||
"""Load the secure cookie from a serialized string.
|
||||
|
||||
:param string: the cookie value to unserialize.
|
||||
:param secret_key: the secret key used to serialize the cookie.
|
||||
:return: a new :class:`SecureCookie`.
|
||||
"""
|
||||
if isinstance(string, text_type):
|
||||
string = string.encode("utf-8", "replace")
|
||||
if isinstance(secret_key, text_type):
|
||||
secret_key = secret_key.encode("utf-8", "replace")
|
||||
try:
|
||||
base64_hash, data = string.split(b"?", 1)
|
||||
except (ValueError, IndexError):
|
||||
items = ()
|
||||
else:
|
||||
items = {}
|
||||
mac = hmac(secret_key, None, cls.hash_method)
|
||||
for item in data.split(b"&"):
|
||||
mac.update(b"|" + item)
|
||||
if b"=" not in item:
|
||||
items = None
|
||||
break
|
||||
key, value = item.split(b"=", 1)
|
||||
# try to make the key a string
|
||||
key = url_unquote_plus(key.decode("ascii"))
|
||||
try:
|
||||
key = to_native(key)
|
||||
except UnicodeError:
|
||||
pass
|
||||
items[key] = value
|
||||
|
||||
# no parsing error and the mac looks okay, we can now
|
||||
# sercurely unpickle our cookie.
|
||||
try:
|
||||
client_hash = base64.b64decode(base64_hash)
|
||||
except TypeError:
|
||||
items = client_hash = None
|
||||
if items is not None and safe_str_cmp(client_hash, mac.digest()):
|
||||
try:
|
||||
for key, value in iteritems(items):
|
||||
items[key] = cls.unquote(value)
|
||||
except UnquoteError:
|
||||
items = ()
|
||||
else:
|
||||
if "_expires" in items:
|
||||
if time() > items["_expires"]:
|
||||
items = ()
|
||||
else:
|
||||
del items["_expires"]
|
||||
else:
|
||||
items = ()
|
||||
return cls(items, secret_key, False)
|
||||
|
||||
@classmethod
|
||||
def load_cookie(cls, request, key="session", secret_key=None):
|
||||
"""Loads a :class:`SecureCookie` from a cookie in request. If the
|
||||
cookie is not set, a new :class:`SecureCookie` instanced is
|
||||
returned.
|
||||
|
||||
:param request: a request object that has a `cookies` attribute
|
||||
which is a dict of all cookie values.
|
||||
:param key: the name of the cookie.
|
||||
:param secret_key: the secret key used to unquote the cookie.
|
||||
Always provide the value even though it has
|
||||
no default!
|
||||
"""
|
||||
data = request.cookies.get(key)
|
||||
if not data:
|
||||
return cls(secret_key=secret_key)
|
||||
return cls.unserialize(data, secret_key)
|
||||
|
||||
def save_cookie(
|
||||
self,
|
||||
response,
|
||||
key="session",
|
||||
expires=None,
|
||||
session_expires=None,
|
||||
max_age=None,
|
||||
path="/",
|
||||
domain=None,
|
||||
secure=None,
|
||||
httponly=False,
|
||||
force=False,
|
||||
):
|
||||
"""Saves the SecureCookie in a cookie on response object. All
|
||||
parameters that are not described here are forwarded directly
|
||||
to :meth:`~BaseResponse.set_cookie`.
|
||||
|
||||
:param response: a response object that has a
|
||||
:meth:`~BaseResponse.set_cookie` method.
|
||||
:param key: the name of the cookie.
|
||||
:param session_expires: the expiration date of the secure cookie
|
||||
stored information. If this is not provided
|
||||
the cookie `expires` date is used instead.
|
||||
"""
|
||||
if force or self.should_save:
|
||||
data = self.serialize(session_expires or expires)
|
||||
response.set_cookie(
|
||||
key,
|
||||
data,
|
||||
expires=expires,
|
||||
max_age=max_age,
|
||||
path=path,
|
||||
domain=domain,
|
||||
secure=secure,
|
||||
httponly=httponly,
|
||||
)
|
||||
389
python/werkzeug/contrib/sessions.py
Normal file
389
python/werkzeug/contrib/sessions.py
Normal file
@@ -0,0 +1,389 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
r"""
|
||||
werkzeug.contrib.sessions
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module contains some helper classes that help one to add session
|
||||
support to a python WSGI application. For full client-side session
|
||||
storage see :mod:`~werkzeug.contrib.securecookie` which implements a
|
||||
secure, client-side session storage.
|
||||
|
||||
|
||||
Application Integration
|
||||
=======================
|
||||
|
||||
::
|
||||
|
||||
from werkzeug.contrib.sessions import SessionMiddleware, \
|
||||
FilesystemSessionStore
|
||||
|
||||
app = SessionMiddleware(app, FilesystemSessionStore())
|
||||
|
||||
The current session will then appear in the WSGI environment as
|
||||
`werkzeug.session`. However it's recommended to not use the middleware
|
||||
but the stores directly in the application. However for very simple
|
||||
scripts a middleware for sessions could be sufficient.
|
||||
|
||||
This module does not implement methods or ways to check if a session is
|
||||
expired. That should be done by a cronjob and storage specific. For
|
||||
example to prune unused filesystem sessions one could check the modified
|
||||
time of the files. If sessions are stored in the database the new()
|
||||
method should add an expiration timestamp for the session.
|
||||
|
||||
For better flexibility it's recommended to not use the middleware but the
|
||||
store and session object directly in the application dispatching::
|
||||
|
||||
session_store = FilesystemSessionStore()
|
||||
|
||||
def application(environ, start_response):
|
||||
request = Request(environ)
|
||||
sid = request.cookies.get('cookie_name')
|
||||
if sid is None:
|
||||
request.session = session_store.new()
|
||||
else:
|
||||
request.session = session_store.get(sid)
|
||||
response = get_the_response_object(request)
|
||||
if request.session.should_save:
|
||||
session_store.save(request.session)
|
||||
response.set_cookie('cookie_name', request.session.sid)
|
||||
return response(environ, start_response)
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
import warnings
|
||||
from hashlib import sha1
|
||||
from os import path
|
||||
from pickle import dump
|
||||
from pickle import HIGHEST_PROTOCOL
|
||||
from pickle import load
|
||||
from random import random
|
||||
from time import time
|
||||
|
||||
from .._compat import PY2
|
||||
from .._compat import text_type
|
||||
from ..datastructures import CallbackDict
|
||||
from ..filesystem import get_filesystem_encoding
|
||||
from ..posixemulation import rename
|
||||
from ..utils import dump_cookie
|
||||
from ..utils import parse_cookie
|
||||
from ..wsgi import ClosingIterator
|
||||
|
||||
warnings.warn(
|
||||
"'werkzeug.contrib.sessions' is deprecated as of version 0.15 and"
|
||||
" will be removed in version 1.0. It has moved to"
|
||||
" https://github.com/pallets/secure-cookie.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
_sha1_re = re.compile(r"^[a-f0-9]{40}$")
|
||||
|
||||
|
||||
def _urandom():
|
||||
if hasattr(os, "urandom"):
|
||||
return os.urandom(30)
|
||||
return text_type(random()).encode("ascii")
|
||||
|
||||
|
||||
def generate_key(salt=None):
|
||||
if salt is None:
|
||||
salt = repr(salt).encode("ascii")
|
||||
return sha1(b"".join([salt, str(time()).encode("ascii"), _urandom()])).hexdigest()
|
||||
|
||||
|
||||
class ModificationTrackingDict(CallbackDict):
|
||||
__slots__ = ("modified",)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
def on_update(self):
|
||||
self.modified = True
|
||||
|
||||
self.modified = False
|
||||
CallbackDict.__init__(self, on_update=on_update)
|
||||
dict.update(self, *args, **kwargs)
|
||||
|
||||
def copy(self):
|
||||
"""Create a flat copy of the dict."""
|
||||
missing = object()
|
||||
result = object.__new__(self.__class__)
|
||||
for name in self.__slots__:
|
||||
val = getattr(self, name, missing)
|
||||
if val is not missing:
|
||||
setattr(result, name, val)
|
||||
return result
|
||||
|
||||
def __copy__(self):
|
||||
return self.copy()
|
||||
|
||||
|
||||
class Session(ModificationTrackingDict):
|
||||
"""Subclass of a dict that keeps track of direct object changes. Changes
|
||||
in mutable structures are not tracked, for those you have to set
|
||||
`modified` to `True` by hand.
|
||||
"""
|
||||
|
||||
__slots__ = ModificationTrackingDict.__slots__ + ("sid", "new")
|
||||
|
||||
def __init__(self, data, sid, new=False):
|
||||
ModificationTrackingDict.__init__(self, data)
|
||||
self.sid = sid
|
||||
self.new = new
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s %s%s>" % (
|
||||
self.__class__.__name__,
|
||||
dict.__repr__(self),
|
||||
"*" if self.should_save else "",
|
||||
)
|
||||
|
||||
@property
|
||||
def should_save(self):
|
||||
"""True if the session should be saved.
|
||||
|
||||
.. versionchanged:: 0.6
|
||||
By default the session is now only saved if the session is
|
||||
modified, not if it is new like it was before.
|
||||
"""
|
||||
return self.modified
|
||||
|
||||
|
||||
class SessionStore(object):
|
||||
"""Baseclass for all session stores. The Werkzeug contrib module does not
|
||||
implement any useful stores besides the filesystem store, application
|
||||
developers are encouraged to create their own stores.
|
||||
|
||||
:param session_class: The session class to use. Defaults to
|
||||
:class:`Session`.
|
||||
"""
|
||||
|
||||
def __init__(self, session_class=None):
|
||||
if session_class is None:
|
||||
session_class = Session
|
||||
self.session_class = session_class
|
||||
|
||||
def is_valid_key(self, key):
|
||||
"""Check if a key has the correct format."""
|
||||
return _sha1_re.match(key) is not None
|
||||
|
||||
def generate_key(self, salt=None):
|
||||
"""Simple function that generates a new session key."""
|
||||
return generate_key(salt)
|
||||
|
||||
def new(self):
|
||||
"""Generate a new session."""
|
||||
return self.session_class({}, self.generate_key(), True)
|
||||
|
||||
def save(self, session):
|
||||
"""Save a session."""
|
||||
|
||||
def save_if_modified(self, session):
|
||||
"""Save if a session class wants an update."""
|
||||
if session.should_save:
|
||||
self.save(session)
|
||||
|
||||
def delete(self, session):
|
||||
"""Delete a session."""
|
||||
|
||||
def get(self, sid):
|
||||
"""Get a session for this sid or a new session object. This method
|
||||
has to check if the session key is valid and create a new session if
|
||||
that wasn't the case.
|
||||
"""
|
||||
return self.session_class({}, sid, True)
|
||||
|
||||
|
||||
#: used for temporary files by the filesystem session store
|
||||
_fs_transaction_suffix = ".__wz_sess"
|
||||
|
||||
|
||||
class FilesystemSessionStore(SessionStore):
|
||||
"""Simple example session store that saves sessions on the filesystem.
|
||||
This store works best on POSIX systems and Windows Vista / Windows
|
||||
Server 2008 and newer.
|
||||
|
||||
.. versionchanged:: 0.6
|
||||
`renew_missing` was added. Previously this was considered `True`,
|
||||
now the default changed to `False` and it can be explicitly
|
||||
deactivated.
|
||||
|
||||
:param path: the path to the folder used for storing the sessions.
|
||||
If not provided the default temporary directory is used.
|
||||
:param filename_template: a string template used to give the session
|
||||
a filename. ``%s`` is replaced with the
|
||||
session id.
|
||||
:param session_class: The session class to use. Defaults to
|
||||
:class:`Session`.
|
||||
:param renew_missing: set to `True` if you want the store to
|
||||
give the user a new sid if the session was
|
||||
not yet saved.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path=None,
|
||||
filename_template="werkzeug_%s.sess",
|
||||
session_class=None,
|
||||
renew_missing=False,
|
||||
mode=0o644,
|
||||
):
|
||||
SessionStore.__init__(self, session_class)
|
||||
if path is None:
|
||||
path = tempfile.gettempdir()
|
||||
self.path = path
|
||||
if isinstance(filename_template, text_type) and PY2:
|
||||
filename_template = filename_template.encode(get_filesystem_encoding())
|
||||
assert not filename_template.endswith(_fs_transaction_suffix), (
|
||||
"filename templates may not end with %s" % _fs_transaction_suffix
|
||||
)
|
||||
self.filename_template = filename_template
|
||||
self.renew_missing = renew_missing
|
||||
self.mode = mode
|
||||
|
||||
def get_session_filename(self, sid):
|
||||
# out of the box, this should be a strict ASCII subset but
|
||||
# you might reconfigure the session object to have a more
|
||||
# arbitrary string.
|
||||
if isinstance(sid, text_type) and PY2:
|
||||
sid = sid.encode(get_filesystem_encoding())
|
||||
return path.join(self.path, self.filename_template % sid)
|
||||
|
||||
def save(self, session):
|
||||
fn = self.get_session_filename(session.sid)
|
||||
fd, tmp = tempfile.mkstemp(suffix=_fs_transaction_suffix, dir=self.path)
|
||||
f = os.fdopen(fd, "wb")
|
||||
try:
|
||||
dump(dict(session), f, HIGHEST_PROTOCOL)
|
||||
finally:
|
||||
f.close()
|
||||
try:
|
||||
rename(tmp, fn)
|
||||
os.chmod(fn, self.mode)
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
|
||||
def delete(self, session):
|
||||
fn = self.get_session_filename(session.sid)
|
||||
try:
|
||||
os.unlink(fn)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def get(self, sid):
|
||||
if not self.is_valid_key(sid):
|
||||
return self.new()
|
||||
try:
|
||||
f = open(self.get_session_filename(sid), "rb")
|
||||
except IOError:
|
||||
if self.renew_missing:
|
||||
return self.new()
|
||||
data = {}
|
||||
else:
|
||||
try:
|
||||
try:
|
||||
data = load(f)
|
||||
except Exception:
|
||||
data = {}
|
||||
finally:
|
||||
f.close()
|
||||
return self.session_class(data, sid, False)
|
||||
|
||||
def list(self):
|
||||
"""Lists all sessions in the store.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
"""
|
||||
before, after = self.filename_template.split("%s", 1)
|
||||
filename_re = re.compile(
|
||||
r"%s(.{5,})%s$" % (re.escape(before), re.escape(after))
|
||||
)
|
||||
result = []
|
||||
for filename in os.listdir(self.path):
|
||||
#: this is a session that is still being saved.
|
||||
if filename.endswith(_fs_transaction_suffix):
|
||||
continue
|
||||
match = filename_re.match(filename)
|
||||
if match is not None:
|
||||
result.append(match.group(1))
|
||||
return result
|
||||
|
||||
|
||||
class SessionMiddleware(object):
|
||||
"""A simple middleware that puts the session object of a store provided
|
||||
into the WSGI environ. It automatically sets cookies and restores
|
||||
sessions.
|
||||
|
||||
However a middleware is not the preferred solution because it won't be as
|
||||
fast as sessions managed by the application itself and will put a key into
|
||||
the WSGI environment only relevant for the application which is against
|
||||
the concept of WSGI.
|
||||
|
||||
The cookie parameters are the same as for the :func:`~dump_cookie`
|
||||
function just prefixed with ``cookie_``. Additionally `max_age` is
|
||||
called `cookie_age` and not `cookie_max_age` because of backwards
|
||||
compatibility.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app,
|
||||
store,
|
||||
cookie_name="session_id",
|
||||
cookie_age=None,
|
||||
cookie_expires=None,
|
||||
cookie_path="/",
|
||||
cookie_domain=None,
|
||||
cookie_secure=None,
|
||||
cookie_httponly=False,
|
||||
cookie_samesite="Lax",
|
||||
environ_key="werkzeug.session",
|
||||
):
|
||||
self.app = app
|
||||
self.store = store
|
||||
self.cookie_name = cookie_name
|
||||
self.cookie_age = cookie_age
|
||||
self.cookie_expires = cookie_expires
|
||||
self.cookie_path = cookie_path
|
||||
self.cookie_domain = cookie_domain
|
||||
self.cookie_secure = cookie_secure
|
||||
self.cookie_httponly = cookie_httponly
|
||||
self.cookie_samesite = cookie_samesite
|
||||
self.environ_key = environ_key
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
cookie = parse_cookie(environ.get("HTTP_COOKIE", ""))
|
||||
sid = cookie.get(self.cookie_name, None)
|
||||
if sid is None:
|
||||
session = self.store.new()
|
||||
else:
|
||||
session = self.store.get(sid)
|
||||
environ[self.environ_key] = session
|
||||
|
||||
def injecting_start_response(status, headers, exc_info=None):
|
||||
if session.should_save:
|
||||
self.store.save(session)
|
||||
headers.append(
|
||||
(
|
||||
"Set-Cookie",
|
||||
dump_cookie(
|
||||
self.cookie_name,
|
||||
session.sid,
|
||||
self.cookie_age,
|
||||
self.cookie_expires,
|
||||
self.cookie_path,
|
||||
self.cookie_domain,
|
||||
self.cookie_secure,
|
||||
self.cookie_httponly,
|
||||
samesite=self.cookie_samesite,
|
||||
),
|
||||
)
|
||||
)
|
||||
return start_response(status, headers, exc_info)
|
||||
|
||||
return ClosingIterator(
|
||||
self.app(environ, injecting_start_response),
|
||||
lambda: self.store.save_if_modified(session),
|
||||
)
|
||||
385
python/werkzeug/contrib/wrappers.py
Normal file
385
python/werkzeug/contrib/wrappers.py
Normal file
@@ -0,0 +1,385 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.contrib.wrappers
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Extra wrappers or mixins contributed by the community. These wrappers can
|
||||
be mixed in into request objects to add extra functionality.
|
||||
|
||||
Example::
|
||||
|
||||
from werkzeug.wrappers import Request as RequestBase
|
||||
from werkzeug.contrib.wrappers import JSONRequestMixin
|
||||
|
||||
class Request(RequestBase, JSONRequestMixin):
|
||||
pass
|
||||
|
||||
Afterwards this request object provides the extra functionality of the
|
||||
:class:`JSONRequestMixin`.
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import codecs
|
||||
import warnings
|
||||
|
||||
from .._compat import wsgi_decoding_dance
|
||||
from ..exceptions import BadRequest
|
||||
from ..http import dump_options_header
|
||||
from ..http import parse_options_header
|
||||
from ..utils import cached_property
|
||||
from ..wrappers.json import JSONMixin as _JSONMixin
|
||||
|
||||
|
||||
def is_known_charset(charset):
|
||||
"""Checks if the given charset is known to Python."""
|
||||
try:
|
||||
codecs.lookup(charset)
|
||||
except LookupError:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class JSONRequestMixin(_JSONMixin):
|
||||
"""
|
||||
.. deprecated:: 0.15
|
||||
Moved to :class:`werkzeug.wrappers.json.JSONMixin`. This old
|
||||
import will be removed in version 1.0.
|
||||
"""
|
||||
|
||||
@property
|
||||
def json(self):
|
||||
warnings.warn(
|
||||
"'werkzeug.contrib.wrappers.JSONRequestMixin' has moved to"
|
||||
" 'werkzeug.wrappers.json.JSONMixin'. This old import will"
|
||||
" be removed in version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return super(JSONRequestMixin, self).json
|
||||
|
||||
|
||||
class ProtobufRequestMixin(object):
|
||||
|
||||
"""Add protobuf parsing method to a request object. This will parse the
|
||||
input data through `protobuf`_ if possible.
|
||||
|
||||
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
|
||||
is not protobuf or if the data itself cannot be parsed property.
|
||||
|
||||
.. _protobuf: https://github.com/protocolbuffers/protobuf
|
||||
|
||||
.. deprecated:: 0.15
|
||||
This mixin will be removed in version 1.0.
|
||||
"""
|
||||
|
||||
#: by default the :class:`ProtobufRequestMixin` will raise a
|
||||
#: :exc:`~werkzeug.exceptions.BadRequest` if the object is not
|
||||
#: initialized. You can bypass that check by setting this
|
||||
#: attribute to `False`.
|
||||
protobuf_check_initialization = True
|
||||
|
||||
def parse_protobuf(self, proto_type):
|
||||
"""Parse the data into an instance of proto_type."""
|
||||
warnings.warn(
|
||||
"'werkzeug.contrib.wrappers.ProtobufRequestMixin' is"
|
||||
" deprecated as of version 0.15 and will be removed in"
|
||||
" version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
if "protobuf" not in self.environ.get("CONTENT_TYPE", ""):
|
||||
raise BadRequest("Not a Protobuf request")
|
||||
|
||||
obj = proto_type()
|
||||
try:
|
||||
obj.ParseFromString(self.data)
|
||||
except Exception:
|
||||
raise BadRequest("Unable to parse Protobuf request")
|
||||
|
||||
# Fail if not all required fields are set
|
||||
if self.protobuf_check_initialization and not obj.IsInitialized():
|
||||
raise BadRequest("Partial Protobuf request")
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
class RoutingArgsRequestMixin(object):
|
||||
|
||||
"""This request mixin adds support for the wsgiorg routing args
|
||||
`specification`_.
|
||||
|
||||
.. _specification: https://wsgi.readthedocs.io/en/latest/
|
||||
specifications/routing_args.html
|
||||
|
||||
.. deprecated:: 0.15
|
||||
This mixin will be removed in version 1.0.
|
||||
"""
|
||||
|
||||
def _get_routing_args(self):
|
||||
warnings.warn(
|
||||
"'werkzeug.contrib.wrappers.RoutingArgsRequestMixin' is"
|
||||
" deprecated as of version 0.15 and will be removed in"
|
||||
" version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return self.environ.get("wsgiorg.routing_args", (()))[0]
|
||||
|
||||
def _set_routing_args(self, value):
|
||||
warnings.warn(
|
||||
"'werkzeug.contrib.wrappers.RoutingArgsRequestMixin' is"
|
||||
" deprecated as of version 0.15 and will be removed in"
|
||||
" version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
if self.shallow:
|
||||
raise RuntimeError(
|
||||
"A shallow request tried to modify the WSGI "
|
||||
"environment. If you really want to do that, "
|
||||
"set `shallow` to False."
|
||||
)
|
||||
self.environ["wsgiorg.routing_args"] = (value, self.routing_vars)
|
||||
|
||||
routing_args = property(
|
||||
_get_routing_args,
|
||||
_set_routing_args,
|
||||
doc="""
|
||||
The positional URL arguments as `tuple`.""",
|
||||
)
|
||||
del _get_routing_args, _set_routing_args
|
||||
|
||||
def _get_routing_vars(self):
|
||||
warnings.warn(
|
||||
"'werkzeug.contrib.wrappers.RoutingArgsRequestMixin' is"
|
||||
" deprecated as of version 0.15 and will be removed in"
|
||||
" version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
rv = self.environ.get("wsgiorg.routing_args")
|
||||
if rv is not None:
|
||||
return rv[1]
|
||||
rv = {}
|
||||
if not self.shallow:
|
||||
self.routing_vars = rv
|
||||
return rv
|
||||
|
||||
def _set_routing_vars(self, value):
|
||||
warnings.warn(
|
||||
"'werkzeug.contrib.wrappers.RoutingArgsRequestMixin' is"
|
||||
" deprecated as of version 0.15 and will be removed in"
|
||||
" version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
if self.shallow:
|
||||
raise RuntimeError(
|
||||
"A shallow request tried to modify the WSGI "
|
||||
"environment. If you really want to do that, "
|
||||
"set `shallow` to False."
|
||||
)
|
||||
self.environ["wsgiorg.routing_args"] = (self.routing_args, value)
|
||||
|
||||
routing_vars = property(
|
||||
_get_routing_vars,
|
||||
_set_routing_vars,
|
||||
doc="""
|
||||
The keyword URL arguments as `dict`.""",
|
||||
)
|
||||
del _get_routing_vars, _set_routing_vars
|
||||
|
||||
|
||||
class ReverseSlashBehaviorRequestMixin(object):
|
||||
|
||||
"""This mixin reverses the trailing slash behavior of :attr:`script_root`
|
||||
and :attr:`path`. This makes it possible to use :func:`~urlparse.urljoin`
|
||||
directly on the paths.
|
||||
|
||||
Because it changes the behavior or :class:`Request` this class has to be
|
||||
mixed in *before* the actual request class::
|
||||
|
||||
class MyRequest(ReverseSlashBehaviorRequestMixin, Request):
|
||||
pass
|
||||
|
||||
This example shows the differences (for an application mounted on
|
||||
`/application` and the request going to `/application/foo/bar`):
|
||||
|
||||
+---------------+-------------------+---------------------+
|
||||
| | normal behavior | reverse behavior |
|
||||
+===============+===================+=====================+
|
||||
| `script_root` | ``/application`` | ``/application/`` |
|
||||
+---------------+-------------------+---------------------+
|
||||
| `path` | ``/foo/bar`` | ``foo/bar`` |
|
||||
+---------------+-------------------+---------------------+
|
||||
|
||||
.. deprecated:: 0.15
|
||||
This mixin will be removed in version 1.0.
|
||||
"""
|
||||
|
||||
@cached_property
|
||||
def path(self):
|
||||
"""Requested path as unicode. This works a bit like the regular path
|
||||
info in the WSGI environment but will not include a leading slash.
|
||||
"""
|
||||
warnings.warn(
|
||||
"'werkzeug.contrib.wrappers.ReverseSlashBehaviorRequestMixin'"
|
||||
" is deprecated as of version 0.15 and will be removed in"
|
||||
" version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
path = wsgi_decoding_dance(
|
||||
self.environ.get("PATH_INFO") or "", self.charset, self.encoding_errors
|
||||
)
|
||||
return path.lstrip("/")
|
||||
|
||||
@cached_property
|
||||
def script_root(self):
|
||||
"""The root path of the script includling a trailing slash."""
|
||||
warnings.warn(
|
||||
"'werkzeug.contrib.wrappers.ReverseSlashBehaviorRequestMixin'"
|
||||
" is deprecated as of version 0.15 and will be removed in"
|
||||
" version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
path = wsgi_decoding_dance(
|
||||
self.environ.get("SCRIPT_NAME") or "", self.charset, self.encoding_errors
|
||||
)
|
||||
return path.rstrip("/") + "/"
|
||||
|
||||
|
||||
class DynamicCharsetRequestMixin(object):
|
||||
|
||||
""""If this mixin is mixed into a request class it will provide
|
||||
a dynamic `charset` attribute. This means that if the charset is
|
||||
transmitted in the content type headers it's used from there.
|
||||
|
||||
Because it changes the behavior or :class:`Request` this class has
|
||||
to be mixed in *before* the actual request class::
|
||||
|
||||
class MyRequest(DynamicCharsetRequestMixin, Request):
|
||||
pass
|
||||
|
||||
By default the request object assumes that the URL charset is the
|
||||
same as the data charset. If the charset varies on each request
|
||||
based on the transmitted data it's not a good idea to let the URLs
|
||||
change based on that. Most browsers assume either utf-8 or latin1
|
||||
for the URLs if they have troubles figuring out. It's strongly
|
||||
recommended to set the URL charset to utf-8::
|
||||
|
||||
class MyRequest(DynamicCharsetRequestMixin, Request):
|
||||
url_charset = 'utf-8'
|
||||
|
||||
.. deprecated:: 0.15
|
||||
This mixin will be removed in version 1.0.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
"""
|
||||
|
||||
#: the default charset that is assumed if the content type header
|
||||
#: is missing or does not contain a charset parameter. The default
|
||||
#: is latin1 which is what HTTP specifies as default charset.
|
||||
#: You may however want to set this to utf-8 to better support
|
||||
#: browsers that do not transmit a charset for incoming data.
|
||||
default_charset = "latin1"
|
||||
|
||||
def unknown_charset(self, charset):
|
||||
"""Called if a charset was provided but is not supported by
|
||||
the Python codecs module. By default latin1 is assumed then
|
||||
to not lose any information, you may override this method to
|
||||
change the behavior.
|
||||
|
||||
:param charset: the charset that was not found.
|
||||
:return: the replacement charset.
|
||||
"""
|
||||
return "latin1"
|
||||
|
||||
@cached_property
|
||||
def charset(self):
|
||||
"""The charset from the content type."""
|
||||
warnings.warn(
|
||||
"'werkzeug.contrib.wrappers.DynamicCharsetRequestMixin'"
|
||||
" is deprecated as of version 0.15 and will be removed in"
|
||||
" version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
header = self.environ.get("CONTENT_TYPE")
|
||||
if header:
|
||||
ct, options = parse_options_header(header)
|
||||
charset = options.get("charset")
|
||||
if charset:
|
||||
if is_known_charset(charset):
|
||||
return charset
|
||||
return self.unknown_charset(charset)
|
||||
return self.default_charset
|
||||
|
||||
|
||||
class DynamicCharsetResponseMixin(object):
|
||||
|
||||
"""If this mixin is mixed into a response class it will provide
|
||||
a dynamic `charset` attribute. This means that if the charset is
|
||||
looked up and stored in the `Content-Type` header and updates
|
||||
itself automatically. This also means a small performance hit but
|
||||
can be useful if you're working with different charsets on
|
||||
responses.
|
||||
|
||||
Because the charset attribute is no a property at class-level, the
|
||||
default value is stored in `default_charset`.
|
||||
|
||||
Because it changes the behavior or :class:`Response` this class has
|
||||
to be mixed in *before* the actual response class::
|
||||
|
||||
class MyResponse(DynamicCharsetResponseMixin, Response):
|
||||
pass
|
||||
|
||||
.. deprecated:: 0.15
|
||||
This mixin will be removed in version 1.0.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
"""
|
||||
|
||||
#: the default charset.
|
||||
default_charset = "utf-8"
|
||||
|
||||
def _get_charset(self):
|
||||
warnings.warn(
|
||||
"'werkzeug.contrib.wrappers.DynamicCharsetResponseMixin'"
|
||||
" is deprecated as of version 0.15 and will be removed in"
|
||||
" version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
header = self.headers.get("content-type")
|
||||
if header:
|
||||
charset = parse_options_header(header)[1].get("charset")
|
||||
if charset:
|
||||
return charset
|
||||
return self.default_charset
|
||||
|
||||
def _set_charset(self, charset):
|
||||
warnings.warn(
|
||||
"'werkzeug.contrib.wrappers.DynamicCharsetResponseMixin'"
|
||||
" is deprecated as of version 0.15 and will be removed in"
|
||||
" version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
header = self.headers.get("content-type")
|
||||
ct, options = parse_options_header(header)
|
||||
if not ct:
|
||||
raise TypeError("Cannot set charset if Content-Type header is missing.")
|
||||
options["charset"] = charset
|
||||
self.headers["Content-Type"] = dump_options_header(ct, options)
|
||||
|
||||
charset = property(
|
||||
_get_charset,
|
||||
_set_charset,
|
||||
doc="""
|
||||
The charset for the response. It's stored inside the
|
||||
Content-Type header as a parameter.""",
|
||||
)
|
||||
del _get_charset, _set_charset
|
||||
2852
python/werkzeug/datastructures.py
Normal file
2852
python/werkzeug/datastructures.py
Normal file
File diff suppressed because it is too large
Load Diff
774
python/werkzeug/exceptions.py
Normal file
774
python/werkzeug/exceptions.py
Normal file
@@ -0,0 +1,774 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.exceptions
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module implements a number of Python exceptions you can raise from
|
||||
within your views to trigger a standard non-200 response.
|
||||
|
||||
|
||||
Usage Example
|
||||
-------------
|
||||
|
||||
::
|
||||
|
||||
from werkzeug.wrappers import BaseRequest
|
||||
from werkzeug.wsgi import responder
|
||||
from werkzeug.exceptions import HTTPException, NotFound
|
||||
|
||||
def view(request):
|
||||
raise NotFound()
|
||||
|
||||
@responder
|
||||
def application(environ, start_response):
|
||||
request = BaseRequest(environ)
|
||||
try:
|
||||
return view(request)
|
||||
except HTTPException as e:
|
||||
return e
|
||||
|
||||
|
||||
As you can see from this example those exceptions are callable WSGI
|
||||
applications. Because of Python 2.4 compatibility those do not extend
|
||||
from the response objects but only from the python exception class.
|
||||
|
||||
As a matter of fact they are not Werkzeug response objects. However you
|
||||
can get a response object by calling ``get_response()`` on a HTTP
|
||||
exception.
|
||||
|
||||
Keep in mind that you have to pass an environment to ``get_response()``
|
||||
because some errors fetch additional information from the WSGI
|
||||
environment.
|
||||
|
||||
If you want to hook in a different exception page to say, a 404 status
|
||||
code, you can add a second except for a specific subclass of an error::
|
||||
|
||||
@responder
|
||||
def application(environ, start_response):
|
||||
request = BaseRequest(environ)
|
||||
try:
|
||||
return view(request)
|
||||
except NotFound, e:
|
||||
return not_found(request)
|
||||
except HTTPException, e:
|
||||
return e
|
||||
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import sys
|
||||
|
||||
import werkzeug
|
||||
|
||||
# Because of bootstrapping reasons we need to manually patch ourselves
|
||||
# onto our parent module.
|
||||
werkzeug.exceptions = sys.modules[__name__]
|
||||
|
||||
from ._compat import implements_to_string
|
||||
from ._compat import integer_types
|
||||
from ._compat import iteritems
|
||||
from ._compat import text_type
|
||||
from ._internal import _get_environ
|
||||
from .wrappers import Response
|
||||
|
||||
|
||||
@implements_to_string
|
||||
class HTTPException(Exception):
|
||||
"""Baseclass for all HTTP exceptions. This exception can be called as WSGI
|
||||
application to render a default error page or you can catch the subclasses
|
||||
of it independently and render nicer error messages.
|
||||
"""
|
||||
|
||||
code = None
|
||||
description = None
|
||||
|
||||
def __init__(self, description=None, response=None):
|
||||
super(Exception, self).__init__()
|
||||
if description is not None:
|
||||
self.description = description
|
||||
self.response = response
|
||||
|
||||
@classmethod
|
||||
def wrap(cls, exception, name=None):
|
||||
"""Create an exception that is a subclass of the calling HTTP
|
||||
exception and the ``exception`` argument.
|
||||
|
||||
The first argument to the class will be passed to the
|
||||
wrapped ``exception``, the rest to the HTTP exception. If
|
||||
``self.args`` is not empty, the wrapped exception message is
|
||||
added to the HTTP exception description.
|
||||
|
||||
.. versionchanged:: 0.15
|
||||
The description includes the wrapped exception message.
|
||||
"""
|
||||
|
||||
class newcls(cls, exception):
|
||||
def __init__(self, arg=None, *args, **kwargs):
|
||||
super(cls, self).__init__(*args, **kwargs)
|
||||
|
||||
if arg is None:
|
||||
exception.__init__(self)
|
||||
else:
|
||||
exception.__init__(self, arg)
|
||||
|
||||
def get_description(self, environ=None):
|
||||
out = super(cls, self).get_description(environ=environ)
|
||||
|
||||
if self.args:
|
||||
out += "<p><pre><code>{}: {}</code></pre></p>".format(
|
||||
exception.__name__, escape(exception.__str__(self))
|
||||
)
|
||||
|
||||
return out
|
||||
|
||||
newcls.__module__ = sys._getframe(1).f_globals.get("__name__")
|
||||
newcls.__name__ = name or cls.__name__ + exception.__name__
|
||||
return newcls
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
"""The status name."""
|
||||
return HTTP_STATUS_CODES.get(self.code, "Unknown Error")
|
||||
|
||||
def get_description(self, environ=None):
|
||||
"""Get the description."""
|
||||
return u"<p>%s</p>" % escape(self.description)
|
||||
|
||||
def get_body(self, environ=None):
|
||||
"""Get the HTML body."""
|
||||
return text_type(
|
||||
(
|
||||
u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
|
||||
u"<title>%(code)s %(name)s</title>\n"
|
||||
u"<h1>%(name)s</h1>\n"
|
||||
u"%(description)s\n"
|
||||
)
|
||||
% {
|
||||
"code": self.code,
|
||||
"name": escape(self.name),
|
||||
"description": self.get_description(environ),
|
||||
}
|
||||
)
|
||||
|
||||
def get_headers(self, environ=None):
|
||||
"""Get a list of headers."""
|
||||
return [("Content-Type", "text/html")]
|
||||
|
||||
def get_response(self, environ=None):
|
||||
"""Get a response object. If one was passed to the exception
|
||||
it's returned directly.
|
||||
|
||||
:param environ: the optional environ for the request. This
|
||||
can be used to modify the response depending
|
||||
on how the request looked like.
|
||||
:return: a :class:`Response` object or a subclass thereof.
|
||||
"""
|
||||
if self.response is not None:
|
||||
return self.response
|
||||
if environ is not None:
|
||||
environ = _get_environ(environ)
|
||||
headers = self.get_headers(environ)
|
||||
return Response(self.get_body(environ), self.code, headers)
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
"""Call the exception as WSGI application.
|
||||
|
||||
:param environ: the WSGI environment.
|
||||
:param start_response: the response callable provided by the WSGI
|
||||
server.
|
||||
"""
|
||||
response = self.get_response(environ)
|
||||
return response(environ, start_response)
|
||||
|
||||
def __str__(self):
|
||||
code = self.code if self.code is not None else "???"
|
||||
return "%s %s: %s" % (code, self.name, self.description)
|
||||
|
||||
def __repr__(self):
|
||||
code = self.code if self.code is not None else "???"
|
||||
return "<%s '%s: %s'>" % (self.__class__.__name__, code, self.name)
|
||||
|
||||
|
||||
class BadRequest(HTTPException):
|
||||
"""*400* `Bad Request`
|
||||
|
||||
Raise if the browser sends something to the application the application
|
||||
or server cannot handle.
|
||||
"""
|
||||
|
||||
code = 400
|
||||
description = (
|
||||
"The browser (or proxy) sent a request that this server could "
|
||||
"not understand."
|
||||
)
|
||||
|
||||
|
||||
class ClientDisconnected(BadRequest):
|
||||
"""Internal exception that is raised if Werkzeug detects a disconnected
|
||||
client. Since the client is already gone at that point attempting to
|
||||
send the error message to the client might not work and might ultimately
|
||||
result in another exception in the server. Mainly this is here so that
|
||||
it is silenced by default as far as Werkzeug is concerned.
|
||||
|
||||
Since disconnections cannot be reliably detected and are unspecified
|
||||
by WSGI to a large extent this might or might not be raised if a client
|
||||
is gone.
|
||||
|
||||
.. versionadded:: 0.8
|
||||
"""
|
||||
|
||||
|
||||
class SecurityError(BadRequest):
|
||||
"""Raised if something triggers a security error. This is otherwise
|
||||
exactly like a bad request error.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
"""
|
||||
|
||||
|
||||
class BadHost(BadRequest):
|
||||
"""Raised if the submitted host is badly formatted.
|
||||
|
||||
.. versionadded:: 0.11.2
|
||||
"""
|
||||
|
||||
|
||||
class Unauthorized(HTTPException):
|
||||
"""*401* ``Unauthorized``
|
||||
|
||||
Raise if the user is not authorized to access a resource.
|
||||
|
||||
The ``www_authenticate`` argument should be used to set the
|
||||
``WWW-Authenticate`` header. This is used for HTTP basic auth and
|
||||
other schemes. Use :class:`~werkzeug.datastructures.WWWAuthenticate`
|
||||
to create correctly formatted values. Strictly speaking a 401
|
||||
response is invalid if it doesn't provide at least one value for
|
||||
this header, although real clients typically don't care.
|
||||
|
||||
:param description: Override the default message used for the body
|
||||
of the response.
|
||||
:param www-authenticate: A single value, or list of values, for the
|
||||
WWW-Authenticate header.
|
||||
|
||||
.. versionchanged:: 0.15.3
|
||||
If the ``www_authenticate`` argument is not set, the
|
||||
``WWW-Authenticate`` header is not set.
|
||||
|
||||
.. versionchanged:: 0.15.3
|
||||
The ``response`` argument was restored.
|
||||
|
||||
.. versionchanged:: 0.15.1
|
||||
``description`` was moved back as the first argument, restoring
|
||||
its previous position.
|
||||
|
||||
.. versionchanged:: 0.15.0
|
||||
``www_authenticate`` was added as the first argument, ahead of
|
||||
``description``.
|
||||
"""
|
||||
|
||||
code = 401
|
||||
description = (
|
||||
"The server could not verify that you are authorized to access"
|
||||
" the URL requested. You either supplied the wrong credentials"
|
||||
" (e.g. a bad password), or your browser doesn't understand"
|
||||
" how to supply the credentials required."
|
||||
)
|
||||
|
||||
def __init__(self, description=None, response=None, www_authenticate=None):
|
||||
HTTPException.__init__(self, description, response)
|
||||
|
||||
if www_authenticate is not None:
|
||||
if not isinstance(www_authenticate, (tuple, list)):
|
||||
www_authenticate = (www_authenticate,)
|
||||
|
||||
self.www_authenticate = www_authenticate
|
||||
|
||||
def get_headers(self, environ=None):
|
||||
headers = HTTPException.get_headers(self, environ)
|
||||
if self.www_authenticate:
|
||||
headers.append(
|
||||
("WWW-Authenticate", ", ".join([str(x) for x in self.www_authenticate]))
|
||||
)
|
||||
return headers
|
||||
|
||||
|
||||
class Forbidden(HTTPException):
|
||||
"""*403* `Forbidden`
|
||||
|
||||
Raise if the user doesn't have the permission for the requested resource
|
||||
but was authenticated.
|
||||
"""
|
||||
|
||||
code = 403
|
||||
description = (
|
||||
"You don't have the permission to access the requested"
|
||||
" resource. It is either read-protected or not readable by the"
|
||||
" server."
|
||||
)
|
||||
|
||||
|
||||
class NotFound(HTTPException):
|
||||
"""*404* `Not Found`
|
||||
|
||||
Raise if a resource does not exist and never existed.
|
||||
"""
|
||||
|
||||
code = 404
|
||||
description = (
|
||||
"The requested URL was not found on the server. If you entered"
|
||||
" the URL manually please check your spelling and try again."
|
||||
)
|
||||
|
||||
|
||||
class MethodNotAllowed(HTTPException):
|
||||
"""*405* `Method Not Allowed`
|
||||
|
||||
Raise if the server used a method the resource does not handle. For
|
||||
example `POST` if the resource is view only. Especially useful for REST.
|
||||
|
||||
The first argument for this exception should be a list of allowed methods.
|
||||
Strictly speaking the response would be invalid if you don't provide valid
|
||||
methods in the header which you can do with that list.
|
||||
"""
|
||||
|
||||
code = 405
|
||||
description = "The method is not allowed for the requested URL."
|
||||
|
||||
def __init__(self, valid_methods=None, description=None):
|
||||
"""Takes an optional list of valid http methods
|
||||
starting with werkzeug 0.3 the list will be mandatory."""
|
||||
HTTPException.__init__(self, description)
|
||||
self.valid_methods = valid_methods
|
||||
|
||||
def get_headers(self, environ=None):
|
||||
headers = HTTPException.get_headers(self, environ)
|
||||
if self.valid_methods:
|
||||
headers.append(("Allow", ", ".join(self.valid_methods)))
|
||||
return headers
|
||||
|
||||
|
||||
class NotAcceptable(HTTPException):
|
||||
"""*406* `Not Acceptable`
|
||||
|
||||
Raise if the server can't return any content conforming to the
|
||||
`Accept` headers of the client.
|
||||
"""
|
||||
|
||||
code = 406
|
||||
|
||||
description = (
|
||||
"The resource identified by the request is only capable of"
|
||||
" generating response entities which have content"
|
||||
" characteristics not acceptable according to the accept"
|
||||
" headers sent in the request."
|
||||
)
|
||||
|
||||
|
||||
class RequestTimeout(HTTPException):
|
||||
"""*408* `Request Timeout`
|
||||
|
||||
Raise to signalize a timeout.
|
||||
"""
|
||||
|
||||
code = 408
|
||||
description = (
|
||||
"The server closed the network connection because the browser"
|
||||
" didn't finish the request within the specified time."
|
||||
)
|
||||
|
||||
|
||||
class Conflict(HTTPException):
|
||||
"""*409* `Conflict`
|
||||
|
||||
Raise to signal that a request cannot be completed because it conflicts
|
||||
with the current state on the server.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
|
||||
code = 409
|
||||
description = (
|
||||
"A conflict happened while processing the request. The"
|
||||
" resource might have been modified while the request was being"
|
||||
" processed."
|
||||
)
|
||||
|
||||
|
||||
class Gone(HTTPException):
|
||||
"""*410* `Gone`
|
||||
|
||||
Raise if a resource existed previously and went away without new location.
|
||||
"""
|
||||
|
||||
code = 410
|
||||
description = (
|
||||
"The requested URL is no longer available on this server and"
|
||||
" there is no forwarding address. If you followed a link from a"
|
||||
" foreign page, please contact the author of this page."
|
||||
)
|
||||
|
||||
|
||||
class LengthRequired(HTTPException):
|
||||
"""*411* `Length Required`
|
||||
|
||||
Raise if the browser submitted data but no ``Content-Length`` header which
|
||||
is required for the kind of processing the server does.
|
||||
"""
|
||||
|
||||
code = 411
|
||||
description = (
|
||||
"A request with this method requires a valid <code>Content-"
|
||||
"Length</code> header."
|
||||
)
|
||||
|
||||
|
||||
class PreconditionFailed(HTTPException):
|
||||
"""*412* `Precondition Failed`
|
||||
|
||||
Status code used in combination with ``If-Match``, ``If-None-Match``, or
|
||||
``If-Unmodified-Since``.
|
||||
"""
|
||||
|
||||
code = 412
|
||||
description = (
|
||||
"The precondition on the request for the URL failed positive evaluation."
|
||||
)
|
||||
|
||||
|
||||
class RequestEntityTooLarge(HTTPException):
|
||||
"""*413* `Request Entity Too Large`
|
||||
|
||||
The status code one should return if the data submitted exceeded a given
|
||||
limit.
|
||||
"""
|
||||
|
||||
code = 413
|
||||
description = "The data value transmitted exceeds the capacity limit."
|
||||
|
||||
|
||||
class RequestURITooLarge(HTTPException):
|
||||
"""*414* `Request URI Too Large`
|
||||
|
||||
Like *413* but for too long URLs.
|
||||
"""
|
||||
|
||||
code = 414
|
||||
description = (
|
||||
"The length of the requested URL exceeds the capacity limit for"
|
||||
" this server. The request cannot be processed."
|
||||
)
|
||||
|
||||
|
||||
class UnsupportedMediaType(HTTPException):
|
||||
"""*415* `Unsupported Media Type`
|
||||
|
||||
The status code returned if the server is unable to handle the media type
|
||||
the client transmitted.
|
||||
"""
|
||||
|
||||
code = 415
|
||||
description = (
|
||||
"The server does not support the media type transmitted in the request."
|
||||
)
|
||||
|
||||
|
||||
class RequestedRangeNotSatisfiable(HTTPException):
|
||||
"""*416* `Requested Range Not Satisfiable`
|
||||
|
||||
The client asked for an invalid part of the file.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
|
||||
code = 416
|
||||
description = "The server cannot provide the requested range."
|
||||
|
||||
def __init__(self, length=None, units="bytes", description=None):
|
||||
"""Takes an optional `Content-Range` header value based on ``length``
|
||||
parameter.
|
||||
"""
|
||||
HTTPException.__init__(self, description)
|
||||
self.length = length
|
||||
self.units = units
|
||||
|
||||
def get_headers(self, environ=None):
|
||||
headers = HTTPException.get_headers(self, environ)
|
||||
if self.length is not None:
|
||||
headers.append(("Content-Range", "%s */%d" % (self.units, self.length)))
|
||||
return headers
|
||||
|
||||
|
||||
class ExpectationFailed(HTTPException):
|
||||
"""*417* `Expectation Failed`
|
||||
|
||||
The server cannot meet the requirements of the Expect request-header.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
|
||||
code = 417
|
||||
description = "The server could not meet the requirements of the Expect header"
|
||||
|
||||
|
||||
class ImATeapot(HTTPException):
|
||||
"""*418* `I'm a teapot`
|
||||
|
||||
The server should return this if it is a teapot and someone attempted
|
||||
to brew coffee with it.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
|
||||
code = 418
|
||||
description = "This server is a teapot, not a coffee machine"
|
||||
|
||||
|
||||
class UnprocessableEntity(HTTPException):
|
||||
"""*422* `Unprocessable Entity`
|
||||
|
||||
Used if the request is well formed, but the instructions are otherwise
|
||||
incorrect.
|
||||
"""
|
||||
|
||||
code = 422
|
||||
description = (
|
||||
"The request was well-formed but was unable to be followed due"
|
||||
" to semantic errors."
|
||||
)
|
||||
|
||||
|
||||
class Locked(HTTPException):
|
||||
"""*423* `Locked`
|
||||
|
||||
Used if the resource that is being accessed is locked.
|
||||
"""
|
||||
|
||||
code = 423
|
||||
description = "The resource that is being accessed is locked."
|
||||
|
||||
|
||||
class FailedDependency(HTTPException):
|
||||
"""*424* `Failed Dependency`
|
||||
|
||||
Used if the method could not be performed on the resource
|
||||
because the requested action depended on another action and that action failed.
|
||||
"""
|
||||
|
||||
code = 424
|
||||
description = (
|
||||
"The method could not be performed on the resource because the"
|
||||
" requested action depended on another action and that action"
|
||||
" failed."
|
||||
)
|
||||
|
||||
|
||||
class PreconditionRequired(HTTPException):
|
||||
"""*428* `Precondition Required`
|
||||
|
||||
The server requires this request to be conditional, typically to prevent
|
||||
the lost update problem, which is a race condition between two or more
|
||||
clients attempting to update a resource through PUT or DELETE. By requiring
|
||||
each client to include a conditional header ("If-Match" or "If-Unmodified-
|
||||
Since") with the proper value retained from a recent GET request, the
|
||||
server ensures that each client has at least seen the previous revision of
|
||||
the resource.
|
||||
"""
|
||||
|
||||
code = 428
|
||||
description = (
|
||||
"This request is required to be conditional; try using"
|
||||
' "If-Match" or "If-Unmodified-Since".'
|
||||
)
|
||||
|
||||
|
||||
class TooManyRequests(HTTPException):
|
||||
"""*429* `Too Many Requests`
|
||||
|
||||
The server is limiting the rate at which this user receives responses, and
|
||||
this request exceeds that rate. (The server may use any convenient method
|
||||
to identify users and their request rates). The server may include a
|
||||
"Retry-After" header to indicate how long the user should wait before
|
||||
retrying.
|
||||
"""
|
||||
|
||||
code = 429
|
||||
description = "This user has exceeded an allotted request count. Try again later."
|
||||
|
||||
|
||||
class RequestHeaderFieldsTooLarge(HTTPException):
|
||||
"""*431* `Request Header Fields Too Large`
|
||||
|
||||
The server refuses to process the request because the header fields are too
|
||||
large. One or more individual fields may be too large, or the set of all
|
||||
headers is too large.
|
||||
"""
|
||||
|
||||
code = 431
|
||||
description = "One or more header fields exceeds the maximum size."
|
||||
|
||||
|
||||
class UnavailableForLegalReasons(HTTPException):
|
||||
"""*451* `Unavailable For Legal Reasons`
|
||||
|
||||
This status code indicates that the server is denying access to the
|
||||
resource as a consequence of a legal demand.
|
||||
"""
|
||||
|
||||
code = 451
|
||||
description = "Unavailable for legal reasons."
|
||||
|
||||
|
||||
class InternalServerError(HTTPException):
|
||||
"""*500* `Internal Server Error`
|
||||
|
||||
Raise if an internal server error occurred. This is a good fallback if an
|
||||
unknown error occurred in the dispatcher.
|
||||
"""
|
||||
|
||||
code = 500
|
||||
description = (
|
||||
"The server encountered an internal error and was unable to"
|
||||
" complete your request. Either the server is overloaded or"
|
||||
" there is an error in the application."
|
||||
)
|
||||
|
||||
|
||||
class NotImplemented(HTTPException):
|
||||
"""*501* `Not Implemented`
|
||||
|
||||
Raise if the application does not support the action requested by the
|
||||
browser.
|
||||
"""
|
||||
|
||||
code = 501
|
||||
description = "The server does not support the action requested by the browser."
|
||||
|
||||
|
||||
class BadGateway(HTTPException):
|
||||
"""*502* `Bad Gateway`
|
||||
|
||||
If you do proxying in your application you should return this status code
|
||||
if you received an invalid response from the upstream server it accessed
|
||||
in attempting to fulfill the request.
|
||||
"""
|
||||
|
||||
code = 502
|
||||
description = (
|
||||
"The proxy server received an invalid response from an upstream server."
|
||||
)
|
||||
|
||||
|
||||
class ServiceUnavailable(HTTPException):
|
||||
"""*503* `Service Unavailable`
|
||||
|
||||
Status code you should return if a service is temporarily unavailable.
|
||||
"""
|
||||
|
||||
code = 503
|
||||
description = (
|
||||
"The server is temporarily unable to service your request due"
|
||||
" to maintenance downtime or capacity problems. Please try"
|
||||
" again later."
|
||||
)
|
||||
|
||||
|
||||
class GatewayTimeout(HTTPException):
|
||||
"""*504* `Gateway Timeout`
|
||||
|
||||
Status code you should return if a connection to an upstream server
|
||||
times out.
|
||||
"""
|
||||
|
||||
code = 504
|
||||
description = "The connection to an upstream server timed out."
|
||||
|
||||
|
||||
class HTTPVersionNotSupported(HTTPException):
|
||||
"""*505* `HTTP Version Not Supported`
|
||||
|
||||
The server does not support the HTTP protocol version used in the request.
|
||||
"""
|
||||
|
||||
code = 505
|
||||
description = (
|
||||
"The server does not support the HTTP protocol version used in the request."
|
||||
)
|
||||
|
||||
|
||||
default_exceptions = {}
|
||||
__all__ = ["HTTPException"]
|
||||
|
||||
|
||||
def _find_exceptions():
|
||||
for _name, obj in iteritems(globals()):
|
||||
try:
|
||||
is_http_exception = issubclass(obj, HTTPException)
|
||||
except TypeError:
|
||||
is_http_exception = False
|
||||
if not is_http_exception or obj.code is None:
|
||||
continue
|
||||
__all__.append(obj.__name__)
|
||||
old_obj = default_exceptions.get(obj.code, None)
|
||||
if old_obj is not None and issubclass(obj, old_obj):
|
||||
continue
|
||||
default_exceptions[obj.code] = obj
|
||||
|
||||
|
||||
_find_exceptions()
|
||||
del _find_exceptions
|
||||
|
||||
|
||||
class Aborter(object):
|
||||
"""When passed a dict of code -> exception items it can be used as
|
||||
callable that raises exceptions. If the first argument to the
|
||||
callable is an integer it will be looked up in the mapping, if it's
|
||||
a WSGI application it will be raised in a proxy exception.
|
||||
|
||||
The rest of the arguments are forwarded to the exception constructor.
|
||||
"""
|
||||
|
||||
def __init__(self, mapping=None, extra=None):
|
||||
if mapping is None:
|
||||
mapping = default_exceptions
|
||||
self.mapping = dict(mapping)
|
||||
if extra is not None:
|
||||
self.mapping.update(extra)
|
||||
|
||||
def __call__(self, code, *args, **kwargs):
|
||||
if not args and not kwargs and not isinstance(code, integer_types):
|
||||
raise HTTPException(response=code)
|
||||
if code not in self.mapping:
|
||||
raise LookupError("no exception for %r" % code)
|
||||
raise self.mapping[code](*args, **kwargs)
|
||||
|
||||
|
||||
def abort(status, *args, **kwargs):
|
||||
"""Raises an :py:exc:`HTTPException` for the given status code or WSGI
|
||||
application::
|
||||
|
||||
abort(404) # 404 Not Found
|
||||
abort(Response('Hello World'))
|
||||
|
||||
Can be passed a WSGI application or a status code. If a status code is
|
||||
given it's looked up in the list of exceptions and will raise that
|
||||
exception, if passed a WSGI application it will wrap it in a proxy WSGI
|
||||
exception and raise that::
|
||||
|
||||
abort(404)
|
||||
abort(Response('Hello World'))
|
||||
|
||||
"""
|
||||
return _aborter(status, *args, **kwargs)
|
||||
|
||||
|
||||
_aborter = Aborter()
|
||||
|
||||
|
||||
#: an exception that is used internally to signal both a key error and a
|
||||
#: bad request. Used by a lot of the datastructures.
|
||||
BadRequestKeyError = BadRequest.wrap(KeyError)
|
||||
|
||||
# imported here because of circular dependencies of werkzeug.utils
|
||||
from .http import HTTP_STATUS_CODES
|
||||
from .utils import escape
|
||||
64
python/werkzeug/filesystem.py
Normal file
64
python/werkzeug/filesystem.py
Normal file
@@ -0,0 +1,64 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.filesystem
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Various utilities for the local filesystem.
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import codecs
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
# We do not trust traditional unixes.
|
||||
has_likely_buggy_unicode_filesystem = (
|
||||
sys.platform.startswith("linux") or "bsd" in sys.platform
|
||||
)
|
||||
|
||||
|
||||
def _is_ascii_encoding(encoding):
|
||||
"""Given an encoding this figures out if the encoding is actually ASCII (which
|
||||
is something we don't actually want in most cases). This is necessary
|
||||
because ASCII comes under many names such as ANSI_X3.4-1968.
|
||||
"""
|
||||
if encoding is None:
|
||||
return False
|
||||
try:
|
||||
return codecs.lookup(encoding).name == "ascii"
|
||||
except LookupError:
|
||||
return False
|
||||
|
||||
|
||||
class BrokenFilesystemWarning(RuntimeWarning, UnicodeWarning):
|
||||
"""The warning used by Werkzeug to signal a broken filesystem. Will only be
|
||||
used once per runtime."""
|
||||
|
||||
|
||||
_warned_about_filesystem_encoding = False
|
||||
|
||||
|
||||
def get_filesystem_encoding():
|
||||
"""Returns the filesystem encoding that should be used. Note that this is
|
||||
different from the Python understanding of the filesystem encoding which
|
||||
might be deeply flawed. Do not use this value against Python's unicode APIs
|
||||
because it might be different. See :ref:`filesystem-encoding` for the exact
|
||||
behavior.
|
||||
|
||||
The concept of a filesystem encoding in generally is not something you
|
||||
should rely on. As such if you ever need to use this function except for
|
||||
writing wrapper code reconsider.
|
||||
"""
|
||||
global _warned_about_filesystem_encoding
|
||||
rv = sys.getfilesystemencoding()
|
||||
if has_likely_buggy_unicode_filesystem and not rv or _is_ascii_encoding(rv):
|
||||
if not _warned_about_filesystem_encoding:
|
||||
warnings.warn(
|
||||
"Detected a misconfigured UNIX filesystem: Will use"
|
||||
" UTF-8 as filesystem encoding instead of {0!r}".format(rv),
|
||||
BrokenFilesystemWarning,
|
||||
)
|
||||
_warned_about_filesystem_encoding = True
|
||||
return "utf-8"
|
||||
return rv
|
||||
586
python/werkzeug/formparser.py
Normal file
586
python/werkzeug/formparser.py
Normal file
@@ -0,0 +1,586 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.formparser
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module implements the form parsing. It supports url-encoded forms
|
||||
as well as non-nested multipart uploads.
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import codecs
|
||||
import re
|
||||
from functools import update_wrapper
|
||||
from itertools import chain
|
||||
from itertools import repeat
|
||||
from itertools import tee
|
||||
|
||||
from ._compat import BytesIO
|
||||
from ._compat import text_type
|
||||
from ._compat import to_native
|
||||
from .datastructures import FileStorage
|
||||
from .datastructures import Headers
|
||||
from .datastructures import MultiDict
|
||||
from .http import parse_options_header
|
||||
from .urls import url_decode_stream
|
||||
from .wsgi import get_content_length
|
||||
from .wsgi import get_input_stream
|
||||
from .wsgi import make_line_iter
|
||||
|
||||
# there are some platforms where SpooledTemporaryFile is not available.
|
||||
# In that case we need to provide a fallback.
|
||||
try:
|
||||
from tempfile import SpooledTemporaryFile
|
||||
except ImportError:
|
||||
from tempfile import TemporaryFile
|
||||
|
||||
SpooledTemporaryFile = None
|
||||
|
||||
|
||||
#: an iterator that yields empty strings
|
||||
_empty_string_iter = repeat("")
|
||||
|
||||
#: a regular expression for multipart boundaries
|
||||
_multipart_boundary_re = re.compile("^[ -~]{0,200}[!-~]$")
|
||||
|
||||
#: supported http encodings that are also available in python we support
|
||||
#: for multipart messages.
|
||||
_supported_multipart_encodings = frozenset(["base64", "quoted-printable"])
|
||||
|
||||
|
||||
def default_stream_factory(
|
||||
total_content_length, filename, content_type, content_length=None
|
||||
):
|
||||
"""The stream factory that is used per default."""
|
||||
max_size = 1024 * 500
|
||||
if SpooledTemporaryFile is not None:
|
||||
return SpooledTemporaryFile(max_size=max_size, mode="wb+")
|
||||
if total_content_length is None or total_content_length > max_size:
|
||||
return TemporaryFile("wb+")
|
||||
return BytesIO()
|
||||
|
||||
|
||||
def parse_form_data(
|
||||
environ,
|
||||
stream_factory=None,
|
||||
charset="utf-8",
|
||||
errors="replace",
|
||||
max_form_memory_size=None,
|
||||
max_content_length=None,
|
||||
cls=None,
|
||||
silent=True,
|
||||
):
|
||||
"""Parse the form data in the environ and return it as tuple in the form
|
||||
``(stream, form, files)``. You should only call this method if the
|
||||
transport method is `POST`, `PUT`, or `PATCH`.
|
||||
|
||||
If the mimetype of the data transmitted is `multipart/form-data` the
|
||||
files multidict will be filled with `FileStorage` objects. If the
|
||||
mimetype is unknown the input stream is wrapped and returned as first
|
||||
argument, else the stream is empty.
|
||||
|
||||
This is a shortcut for the common usage of :class:`FormDataParser`.
|
||||
|
||||
Have a look at :ref:`dealing-with-request-data` for more details.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
The `max_form_memory_size`, `max_content_length` and
|
||||
`cls` parameters were added.
|
||||
|
||||
.. versionadded:: 0.5.1
|
||||
The optional `silent` flag was added.
|
||||
|
||||
:param environ: the WSGI environment to be used for parsing.
|
||||
:param stream_factory: An optional callable that returns a new read and
|
||||
writeable file descriptor. This callable works
|
||||
the same as :meth:`~BaseResponse._get_file_stream`.
|
||||
:param charset: The character set for URL and url encoded form data.
|
||||
:param errors: The encoding error behavior.
|
||||
:param max_form_memory_size: the maximum number of bytes to be accepted for
|
||||
in-memory stored form data. If the data
|
||||
exceeds the value specified an
|
||||
:exc:`~exceptions.RequestEntityTooLarge`
|
||||
exception is raised.
|
||||
:param max_content_length: If this is provided and the transmitted data
|
||||
is longer than this value an
|
||||
:exc:`~exceptions.RequestEntityTooLarge`
|
||||
exception is raised.
|
||||
:param cls: an optional dict class to use. If this is not specified
|
||||
or `None` the default :class:`MultiDict` is used.
|
||||
:param silent: If set to False parsing errors will not be caught.
|
||||
:return: A tuple in the form ``(stream, form, files)``.
|
||||
"""
|
||||
return FormDataParser(
|
||||
stream_factory,
|
||||
charset,
|
||||
errors,
|
||||
max_form_memory_size,
|
||||
max_content_length,
|
||||
cls,
|
||||
silent,
|
||||
).parse_from_environ(environ)
|
||||
|
||||
|
||||
def exhaust_stream(f):
|
||||
"""Helper decorator for methods that exhausts the stream on return."""
|
||||
|
||||
def wrapper(self, stream, *args, **kwargs):
|
||||
try:
|
||||
return f(self, stream, *args, **kwargs)
|
||||
finally:
|
||||
exhaust = getattr(stream, "exhaust", None)
|
||||
if exhaust is not None:
|
||||
exhaust()
|
||||
else:
|
||||
while 1:
|
||||
chunk = stream.read(1024 * 64)
|
||||
if not chunk:
|
||||
break
|
||||
|
||||
return update_wrapper(wrapper, f)
|
||||
|
||||
|
||||
class FormDataParser(object):
|
||||
"""This class implements parsing of form data for Werkzeug. By itself
|
||||
it can parse multipart and url encoded form data. It can be subclassed
|
||||
and extended but for most mimetypes it is a better idea to use the
|
||||
untouched stream and expose it as separate attributes on a request
|
||||
object.
|
||||
|
||||
.. versionadded:: 0.8
|
||||
|
||||
:param stream_factory: An optional callable that returns a new read and
|
||||
writeable file descriptor. This callable works
|
||||
the same as :meth:`~BaseResponse._get_file_stream`.
|
||||
:param charset: The character set for URL and url encoded form data.
|
||||
:param errors: The encoding error behavior.
|
||||
:param max_form_memory_size: the maximum number of bytes to be accepted for
|
||||
in-memory stored form data. If the data
|
||||
exceeds the value specified an
|
||||
:exc:`~exceptions.RequestEntityTooLarge`
|
||||
exception is raised.
|
||||
:param max_content_length: If this is provided and the transmitted data
|
||||
is longer than this value an
|
||||
:exc:`~exceptions.RequestEntityTooLarge`
|
||||
exception is raised.
|
||||
:param cls: an optional dict class to use. If this is not specified
|
||||
or `None` the default :class:`MultiDict` is used.
|
||||
:param silent: If set to False parsing errors will not be caught.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
stream_factory=None,
|
||||
charset="utf-8",
|
||||
errors="replace",
|
||||
max_form_memory_size=None,
|
||||
max_content_length=None,
|
||||
cls=None,
|
||||
silent=True,
|
||||
):
|
||||
if stream_factory is None:
|
||||
stream_factory = default_stream_factory
|
||||
self.stream_factory = stream_factory
|
||||
self.charset = charset
|
||||
self.errors = errors
|
||||
self.max_form_memory_size = max_form_memory_size
|
||||
self.max_content_length = max_content_length
|
||||
if cls is None:
|
||||
cls = MultiDict
|
||||
self.cls = cls
|
||||
self.silent = silent
|
||||
|
||||
def get_parse_func(self, mimetype, options):
|
||||
return self.parse_functions.get(mimetype)
|
||||
|
||||
def parse_from_environ(self, environ):
|
||||
"""Parses the information from the environment as form data.
|
||||
|
||||
:param environ: the WSGI environment to be used for parsing.
|
||||
:return: A tuple in the form ``(stream, form, files)``.
|
||||
"""
|
||||
content_type = environ.get("CONTENT_TYPE", "")
|
||||
content_length = get_content_length(environ)
|
||||
mimetype, options = parse_options_header(content_type)
|
||||
return self.parse(get_input_stream(environ), mimetype, content_length, options)
|
||||
|
||||
def parse(self, stream, mimetype, content_length, options=None):
|
||||
"""Parses the information from the given stream, mimetype,
|
||||
content length and mimetype parameters.
|
||||
|
||||
:param stream: an input stream
|
||||
:param mimetype: the mimetype of the data
|
||||
:param content_length: the content length of the incoming data
|
||||
:param options: optional mimetype parameters (used for
|
||||
the multipart boundary for instance)
|
||||
:return: A tuple in the form ``(stream, form, files)``.
|
||||
"""
|
||||
if (
|
||||
self.max_content_length is not None
|
||||
and content_length is not None
|
||||
and content_length > self.max_content_length
|
||||
):
|
||||
raise exceptions.RequestEntityTooLarge()
|
||||
if options is None:
|
||||
options = {}
|
||||
|
||||
parse_func = self.get_parse_func(mimetype, options)
|
||||
if parse_func is not None:
|
||||
try:
|
||||
return parse_func(self, stream, mimetype, content_length, options)
|
||||
except ValueError:
|
||||
if not self.silent:
|
||||
raise
|
||||
|
||||
return stream, self.cls(), self.cls()
|
||||
|
||||
@exhaust_stream
|
||||
def _parse_multipart(self, stream, mimetype, content_length, options):
|
||||
parser = MultiPartParser(
|
||||
self.stream_factory,
|
||||
self.charset,
|
||||
self.errors,
|
||||
max_form_memory_size=self.max_form_memory_size,
|
||||
cls=self.cls,
|
||||
)
|
||||
boundary = options.get("boundary")
|
||||
if boundary is None:
|
||||
raise ValueError("Missing boundary")
|
||||
if isinstance(boundary, text_type):
|
||||
boundary = boundary.encode("ascii")
|
||||
form, files = parser.parse(stream, boundary, content_length)
|
||||
return stream, form, files
|
||||
|
||||
@exhaust_stream
|
||||
def _parse_urlencoded(self, stream, mimetype, content_length, options):
|
||||
if (
|
||||
self.max_form_memory_size is not None
|
||||
and content_length is not None
|
||||
and content_length > self.max_form_memory_size
|
||||
):
|
||||
raise exceptions.RequestEntityTooLarge()
|
||||
form = url_decode_stream(stream, self.charset, errors=self.errors, cls=self.cls)
|
||||
return stream, form, self.cls()
|
||||
|
||||
#: mapping of mimetypes to parsing functions
|
||||
parse_functions = {
|
||||
"multipart/form-data": _parse_multipart,
|
||||
"application/x-www-form-urlencoded": _parse_urlencoded,
|
||||
"application/x-url-encoded": _parse_urlencoded,
|
||||
}
|
||||
|
||||
|
||||
def is_valid_multipart_boundary(boundary):
|
||||
"""Checks if the string given is a valid multipart boundary."""
|
||||
return _multipart_boundary_re.match(boundary) is not None
|
||||
|
||||
|
||||
def _line_parse(line):
|
||||
"""Removes line ending characters and returns a tuple (`stripped_line`,
|
||||
`is_terminated`).
|
||||
"""
|
||||
if line[-2:] in ["\r\n", b"\r\n"]:
|
||||
return line[:-2], True
|
||||
elif line[-1:] in ["\r", "\n", b"\r", b"\n"]:
|
||||
return line[:-1], True
|
||||
return line, False
|
||||
|
||||
|
||||
def parse_multipart_headers(iterable):
|
||||
"""Parses multipart headers from an iterable that yields lines (including
|
||||
the trailing newline symbol). The iterable has to be newline terminated.
|
||||
|
||||
The iterable will stop at the line where the headers ended so it can be
|
||||
further consumed.
|
||||
|
||||
:param iterable: iterable of strings that are newline terminated
|
||||
"""
|
||||
result = []
|
||||
for line in iterable:
|
||||
line = to_native(line)
|
||||
line, line_terminated = _line_parse(line)
|
||||
if not line_terminated:
|
||||
raise ValueError("unexpected end of line in multipart header")
|
||||
if not line:
|
||||
break
|
||||
elif line[0] in " \t" and result:
|
||||
key, value = result[-1]
|
||||
result[-1] = (key, value + "\n " + line[1:])
|
||||
else:
|
||||
parts = line.split(":", 1)
|
||||
if len(parts) == 2:
|
||||
result.append((parts[0].strip(), parts[1].strip()))
|
||||
|
||||
# we link the list to the headers, no need to create a copy, the
|
||||
# list was not shared anyways.
|
||||
return Headers(result)
|
||||
|
||||
|
||||
_begin_form = "begin_form"
|
||||
_begin_file = "begin_file"
|
||||
_cont = "cont"
|
||||
_end = "end"
|
||||
|
||||
|
||||
class MultiPartParser(object):
|
||||
def __init__(
|
||||
self,
|
||||
stream_factory=None,
|
||||
charset="utf-8",
|
||||
errors="replace",
|
||||
max_form_memory_size=None,
|
||||
cls=None,
|
||||
buffer_size=64 * 1024,
|
||||
):
|
||||
self.charset = charset
|
||||
self.errors = errors
|
||||
self.max_form_memory_size = max_form_memory_size
|
||||
self.stream_factory = (
|
||||
default_stream_factory if stream_factory is None else stream_factory
|
||||
)
|
||||
self.cls = MultiDict if cls is None else cls
|
||||
|
||||
# make sure the buffer size is divisible by four so that we can base64
|
||||
# decode chunk by chunk
|
||||
assert buffer_size % 4 == 0, "buffer size has to be divisible by 4"
|
||||
# also the buffer size has to be at least 1024 bytes long or long headers
|
||||
# will freak out the system
|
||||
assert buffer_size >= 1024, "buffer size has to be at least 1KB"
|
||||
|
||||
self.buffer_size = buffer_size
|
||||
|
||||
def _fix_ie_filename(self, filename):
|
||||
"""Internet Explorer 6 transmits the full file name if a file is
|
||||
uploaded. This function strips the full path if it thinks the
|
||||
filename is Windows-like absolute.
|
||||
"""
|
||||
if filename[1:3] == ":\\" or filename[:2] == "\\\\":
|
||||
return filename.split("\\")[-1]
|
||||
return filename
|
||||
|
||||
def _find_terminator(self, iterator):
|
||||
"""The terminator might have some additional newlines before it.
|
||||
There is at least one application that sends additional newlines
|
||||
before headers (the python setuptools package).
|
||||
"""
|
||||
for line in iterator:
|
||||
if not line:
|
||||
break
|
||||
line = line.strip()
|
||||
if line:
|
||||
return line
|
||||
return b""
|
||||
|
||||
def fail(self, message):
|
||||
raise ValueError(message)
|
||||
|
||||
def get_part_encoding(self, headers):
|
||||
transfer_encoding = headers.get("content-transfer-encoding")
|
||||
if (
|
||||
transfer_encoding is not None
|
||||
and transfer_encoding in _supported_multipart_encodings
|
||||
):
|
||||
return transfer_encoding
|
||||
|
||||
def get_part_charset(self, headers):
|
||||
# Figure out input charset for current part
|
||||
content_type = headers.get("content-type")
|
||||
if content_type:
|
||||
mimetype, ct_params = parse_options_header(content_type)
|
||||
return ct_params.get("charset", self.charset)
|
||||
return self.charset
|
||||
|
||||
def start_file_streaming(self, filename, headers, total_content_length):
|
||||
if isinstance(filename, bytes):
|
||||
filename = filename.decode(self.charset, self.errors)
|
||||
filename = self._fix_ie_filename(filename)
|
||||
content_type = headers.get("content-type")
|
||||
try:
|
||||
content_length = int(headers["content-length"])
|
||||
except (KeyError, ValueError):
|
||||
content_length = 0
|
||||
container = self.stream_factory(
|
||||
total_content_length=total_content_length,
|
||||
filename=filename,
|
||||
content_type=content_type,
|
||||
content_length=content_length,
|
||||
)
|
||||
return filename, container
|
||||
|
||||
def in_memory_threshold_reached(self, bytes):
|
||||
raise exceptions.RequestEntityTooLarge()
|
||||
|
||||
def validate_boundary(self, boundary):
|
||||
if not boundary:
|
||||
self.fail("Missing boundary")
|
||||
if not is_valid_multipart_boundary(boundary):
|
||||
self.fail("Invalid boundary: %s" % boundary)
|
||||
if len(boundary) > self.buffer_size: # pragma: no cover
|
||||
# this should never happen because we check for a minimum size
|
||||
# of 1024 and boundaries may not be longer than 200. The only
|
||||
# situation when this happens is for non debug builds where
|
||||
# the assert is skipped.
|
||||
self.fail("Boundary longer than buffer size")
|
||||
|
||||
def parse_lines(self, file, boundary, content_length, cap_at_buffer=True):
|
||||
"""Generate parts of
|
||||
``('begin_form', (headers, name))``
|
||||
``('begin_file', (headers, name, filename))``
|
||||
``('cont', bytestring)``
|
||||
``('end', None)``
|
||||
|
||||
Always obeys the grammar
|
||||
parts = ( begin_form cont* end |
|
||||
begin_file cont* end )*
|
||||
"""
|
||||
next_part = b"--" + boundary
|
||||
last_part = next_part + b"--"
|
||||
|
||||
iterator = chain(
|
||||
make_line_iter(
|
||||
file,
|
||||
limit=content_length,
|
||||
buffer_size=self.buffer_size,
|
||||
cap_at_buffer=cap_at_buffer,
|
||||
),
|
||||
_empty_string_iter,
|
||||
)
|
||||
|
||||
terminator = self._find_terminator(iterator)
|
||||
|
||||
if terminator == last_part:
|
||||
return
|
||||
elif terminator != next_part:
|
||||
self.fail("Expected boundary at start of multipart data")
|
||||
|
||||
while terminator != last_part:
|
||||
headers = parse_multipart_headers(iterator)
|
||||
|
||||
disposition = headers.get("content-disposition")
|
||||
if disposition is None:
|
||||
self.fail("Missing Content-Disposition header")
|
||||
disposition, extra = parse_options_header(disposition)
|
||||
transfer_encoding = self.get_part_encoding(headers)
|
||||
name = extra.get("name")
|
||||
filename = extra.get("filename")
|
||||
|
||||
# if no content type is given we stream into memory. A list is
|
||||
# used as a temporary container.
|
||||
if filename is None:
|
||||
yield _begin_form, (headers, name)
|
||||
|
||||
# otherwise we parse the rest of the headers and ask the stream
|
||||
# factory for something we can write in.
|
||||
else:
|
||||
yield _begin_file, (headers, name, filename)
|
||||
|
||||
buf = b""
|
||||
for line in iterator:
|
||||
if not line:
|
||||
self.fail("unexpected end of stream")
|
||||
|
||||
if line[:2] == b"--":
|
||||
terminator = line.rstrip()
|
||||
if terminator in (next_part, last_part):
|
||||
break
|
||||
|
||||
if transfer_encoding is not None:
|
||||
if transfer_encoding == "base64":
|
||||
transfer_encoding = "base64_codec"
|
||||
try:
|
||||
line = codecs.decode(line, transfer_encoding)
|
||||
except Exception:
|
||||
self.fail("could not decode transfer encoded chunk")
|
||||
|
||||
# we have something in the buffer from the last iteration.
|
||||
# this is usually a newline delimiter.
|
||||
if buf:
|
||||
yield _cont, buf
|
||||
buf = b""
|
||||
|
||||
# If the line ends with windows CRLF we write everything except
|
||||
# the last two bytes. In all other cases however we write
|
||||
# everything except the last byte. If it was a newline, that's
|
||||
# fine, otherwise it does not matter because we will write it
|
||||
# the next iteration. this ensures we do not write the
|
||||
# final newline into the stream. That way we do not have to
|
||||
# truncate the stream. However we do have to make sure that
|
||||
# if something else than a newline is in there we write it
|
||||
# out.
|
||||
if line[-2:] == b"\r\n":
|
||||
buf = b"\r\n"
|
||||
cutoff = -2
|
||||
else:
|
||||
buf = line[-1:]
|
||||
cutoff = -1
|
||||
yield _cont, line[:cutoff]
|
||||
|
||||
else: # pragma: no cover
|
||||
raise ValueError("unexpected end of part")
|
||||
|
||||
# if we have a leftover in the buffer that is not a newline
|
||||
# character we have to flush it, otherwise we will chop of
|
||||
# certain values.
|
||||
if buf not in (b"", b"\r", b"\n", b"\r\n"):
|
||||
yield _cont, buf
|
||||
|
||||
yield _end, None
|
||||
|
||||
def parse_parts(self, file, boundary, content_length):
|
||||
"""Generate ``('file', (name, val))`` and
|
||||
``('form', (name, val))`` parts.
|
||||
"""
|
||||
in_memory = 0
|
||||
|
||||
for ellt, ell in self.parse_lines(file, boundary, content_length):
|
||||
if ellt == _begin_file:
|
||||
headers, name, filename = ell
|
||||
is_file = True
|
||||
guard_memory = False
|
||||
filename, container = self.start_file_streaming(
|
||||
filename, headers, content_length
|
||||
)
|
||||
_write = container.write
|
||||
|
||||
elif ellt == _begin_form:
|
||||
headers, name = ell
|
||||
is_file = False
|
||||
container = []
|
||||
_write = container.append
|
||||
guard_memory = self.max_form_memory_size is not None
|
||||
|
||||
elif ellt == _cont:
|
||||
_write(ell)
|
||||
# if we write into memory and there is a memory size limit we
|
||||
# count the number of bytes in memory and raise an exception if
|
||||
# there is too much data in memory.
|
||||
if guard_memory:
|
||||
in_memory += len(ell)
|
||||
if in_memory > self.max_form_memory_size:
|
||||
self.in_memory_threshold_reached(in_memory)
|
||||
|
||||
elif ellt == _end:
|
||||
if is_file:
|
||||
container.seek(0)
|
||||
yield (
|
||||
"file",
|
||||
(name, FileStorage(container, filename, name, headers=headers)),
|
||||
)
|
||||
else:
|
||||
part_charset = self.get_part_charset(headers)
|
||||
yield (
|
||||
"form",
|
||||
(name, b"".join(container).decode(part_charset, self.errors)),
|
||||
)
|
||||
|
||||
def parse(self, file, boundary, content_length):
|
||||
formstream, filestream = tee(
|
||||
self.parse_parts(file, boundary, content_length), 2
|
||||
)
|
||||
form = (p[1] for p in formstream if p[0] == "form")
|
||||
files = (p[1] for p in filestream if p[0] == "file")
|
||||
return self.cls(form), self.cls(files)
|
||||
|
||||
|
||||
from . import exceptions
|
||||
1303
python/werkzeug/http.py
Normal file
1303
python/werkzeug/http.py
Normal file
File diff suppressed because it is too large
Load Diff
421
python/werkzeug/local.py
Normal file
421
python/werkzeug/local.py
Normal file
@@ -0,0 +1,421 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.local
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
This module implements context-local objects.
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import copy
|
||||
from functools import update_wrapper
|
||||
|
||||
from ._compat import implements_bool
|
||||
from ._compat import PY2
|
||||
from .wsgi import ClosingIterator
|
||||
|
||||
# since each thread has its own greenlet we can just use those as identifiers
|
||||
# for the context. If greenlets are not available we fall back to the
|
||||
# current thread ident depending on where it is.
|
||||
try:
|
||||
from greenlet import getcurrent as get_ident
|
||||
except ImportError:
|
||||
try:
|
||||
from thread import get_ident
|
||||
except ImportError:
|
||||
from _thread import get_ident
|
||||
|
||||
|
||||
def release_local(local):
|
||||
"""Releases the contents of the local for the current context.
|
||||
This makes it possible to use locals without a manager.
|
||||
|
||||
Example::
|
||||
|
||||
>>> loc = Local()
|
||||
>>> loc.foo = 42
|
||||
>>> release_local(loc)
|
||||
>>> hasattr(loc, 'foo')
|
||||
False
|
||||
|
||||
With this function one can release :class:`Local` objects as well
|
||||
as :class:`LocalStack` objects. However it is not possible to
|
||||
release data held by proxies that way, one always has to retain
|
||||
a reference to the underlying local object in order to be able
|
||||
to release it.
|
||||
|
||||
.. versionadded:: 0.6.1
|
||||
"""
|
||||
local.__release_local__()
|
||||
|
||||
|
||||
class Local(object):
|
||||
__slots__ = ("__storage__", "__ident_func__")
|
||||
|
||||
def __init__(self):
|
||||
object.__setattr__(self, "__storage__", {})
|
||||
object.__setattr__(self, "__ident_func__", get_ident)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.__storage__.items())
|
||||
|
||||
def __call__(self, proxy):
|
||||
"""Create a proxy for a name."""
|
||||
return LocalProxy(self, proxy)
|
||||
|
||||
def __release_local__(self):
|
||||
self.__storage__.pop(self.__ident_func__(), None)
|
||||
|
||||
def __getattr__(self, name):
|
||||
try:
|
||||
return self.__storage__[self.__ident_func__()][name]
|
||||
except KeyError:
|
||||
raise AttributeError(name)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
ident = self.__ident_func__()
|
||||
storage = self.__storage__
|
||||
try:
|
||||
storage[ident][name] = value
|
||||
except KeyError:
|
||||
storage[ident] = {name: value}
|
||||
|
||||
def __delattr__(self, name):
|
||||
try:
|
||||
del self.__storage__[self.__ident_func__()][name]
|
||||
except KeyError:
|
||||
raise AttributeError(name)
|
||||
|
||||
|
||||
class LocalStack(object):
|
||||
"""This class works similar to a :class:`Local` but keeps a stack
|
||||
of objects instead. This is best explained with an example::
|
||||
|
||||
>>> ls = LocalStack()
|
||||
>>> ls.push(42)
|
||||
>>> ls.top
|
||||
42
|
||||
>>> ls.push(23)
|
||||
>>> ls.top
|
||||
23
|
||||
>>> ls.pop()
|
||||
23
|
||||
>>> ls.top
|
||||
42
|
||||
|
||||
They can be force released by using a :class:`LocalManager` or with
|
||||
the :func:`release_local` function but the correct way is to pop the
|
||||
item from the stack after using. When the stack is empty it will
|
||||
no longer be bound to the current context (and as such released).
|
||||
|
||||
By calling the stack without arguments it returns a proxy that resolves to
|
||||
the topmost item on the stack.
|
||||
|
||||
.. versionadded:: 0.6.1
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._local = Local()
|
||||
|
||||
def __release_local__(self):
|
||||
self._local.__release_local__()
|
||||
|
||||
def _get__ident_func__(self):
|
||||
return self._local.__ident_func__
|
||||
|
||||
def _set__ident_func__(self, value):
|
||||
object.__setattr__(self._local, "__ident_func__", value)
|
||||
|
||||
__ident_func__ = property(_get__ident_func__, _set__ident_func__)
|
||||
del _get__ident_func__, _set__ident_func__
|
||||
|
||||
def __call__(self):
|
||||
def _lookup():
|
||||
rv = self.top
|
||||
if rv is None:
|
||||
raise RuntimeError("object unbound")
|
||||
return rv
|
||||
|
||||
return LocalProxy(_lookup)
|
||||
|
||||
def push(self, obj):
|
||||
"""Pushes a new item to the stack"""
|
||||
rv = getattr(self._local, "stack", None)
|
||||
if rv is None:
|
||||
self._local.stack = rv = []
|
||||
rv.append(obj)
|
||||
return rv
|
||||
|
||||
def pop(self):
|
||||
"""Removes the topmost item from the stack, will return the
|
||||
old value or `None` if the stack was already empty.
|
||||
"""
|
||||
stack = getattr(self._local, "stack", None)
|
||||
if stack is None:
|
||||
return None
|
||||
elif len(stack) == 1:
|
||||
release_local(self._local)
|
||||
return stack[-1]
|
||||
else:
|
||||
return stack.pop()
|
||||
|
||||
@property
|
||||
def top(self):
|
||||
"""The topmost item on the stack. If the stack is empty,
|
||||
`None` is returned.
|
||||
"""
|
||||
try:
|
||||
return self._local.stack[-1]
|
||||
except (AttributeError, IndexError):
|
||||
return None
|
||||
|
||||
|
||||
class LocalManager(object):
|
||||
"""Local objects cannot manage themselves. For that you need a local
|
||||
manager. You can pass a local manager multiple locals or add them later
|
||||
by appending them to `manager.locals`. Every time the manager cleans up,
|
||||
it will clean up all the data left in the locals for this context.
|
||||
|
||||
The `ident_func` parameter can be added to override the default ident
|
||||
function for the wrapped locals.
|
||||
|
||||
.. versionchanged:: 0.6.1
|
||||
Instead of a manager the :func:`release_local` function can be used
|
||||
as well.
|
||||
|
||||
.. versionchanged:: 0.7
|
||||
`ident_func` was added.
|
||||
"""
|
||||
|
||||
def __init__(self, locals=None, ident_func=None):
|
||||
if locals is None:
|
||||
self.locals = []
|
||||
elif isinstance(locals, Local):
|
||||
self.locals = [locals]
|
||||
else:
|
||||
self.locals = list(locals)
|
||||
if ident_func is not None:
|
||||
self.ident_func = ident_func
|
||||
for local in self.locals:
|
||||
object.__setattr__(local, "__ident_func__", ident_func)
|
||||
else:
|
||||
self.ident_func = get_ident
|
||||
|
||||
def get_ident(self):
|
||||
"""Return the context identifier the local objects use internally for
|
||||
this context. You cannot override this method to change the behavior
|
||||
but use it to link other context local objects (such as SQLAlchemy's
|
||||
scoped sessions) to the Werkzeug locals.
|
||||
|
||||
.. versionchanged:: 0.7
|
||||
You can pass a different ident function to the local manager that
|
||||
will then be propagated to all the locals passed to the
|
||||
constructor.
|
||||
"""
|
||||
return self.ident_func()
|
||||
|
||||
def cleanup(self):
|
||||
"""Manually clean up the data in the locals for this context. Call
|
||||
this at the end of the request or use `make_middleware()`.
|
||||
"""
|
||||
for local in self.locals:
|
||||
release_local(local)
|
||||
|
||||
def make_middleware(self, app):
|
||||
"""Wrap a WSGI application so that cleaning up happens after
|
||||
request end.
|
||||
"""
|
||||
|
||||
def application(environ, start_response):
|
||||
return ClosingIterator(app(environ, start_response), self.cleanup)
|
||||
|
||||
return application
|
||||
|
||||
def middleware(self, func):
|
||||
"""Like `make_middleware` but for decorating functions.
|
||||
|
||||
Example usage::
|
||||
|
||||
@manager.middleware
|
||||
def application(environ, start_response):
|
||||
...
|
||||
|
||||
The difference to `make_middleware` is that the function passed
|
||||
will have all the arguments copied from the inner application
|
||||
(name, docstring, module).
|
||||
"""
|
||||
return update_wrapper(self.make_middleware(func), func)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s storages: %d>" % (self.__class__.__name__, len(self.locals))
|
||||
|
||||
|
||||
@implements_bool
|
||||
class LocalProxy(object):
|
||||
"""Acts as a proxy for a werkzeug local. Forwards all operations to
|
||||
a proxied object. The only operations not supported for forwarding
|
||||
are right handed operands and any kind of assignment.
|
||||
|
||||
Example usage::
|
||||
|
||||
from werkzeug.local import Local
|
||||
l = Local()
|
||||
|
||||
# these are proxies
|
||||
request = l('request')
|
||||
user = l('user')
|
||||
|
||||
|
||||
from werkzeug.local import LocalStack
|
||||
_response_local = LocalStack()
|
||||
|
||||
# this is a proxy
|
||||
response = _response_local()
|
||||
|
||||
Whenever something is bound to l.user / l.request the proxy objects
|
||||
will forward all operations. If no object is bound a :exc:`RuntimeError`
|
||||
will be raised.
|
||||
|
||||
To create proxies to :class:`Local` or :class:`LocalStack` objects,
|
||||
call the object as shown above. If you want to have a proxy to an
|
||||
object looked up by a function, you can (as of Werkzeug 0.6.1) pass
|
||||
a function to the :class:`LocalProxy` constructor::
|
||||
|
||||
session = LocalProxy(lambda: get_current_request().session)
|
||||
|
||||
.. versionchanged:: 0.6.1
|
||||
The class can be instantiated with a callable as well now.
|
||||
"""
|
||||
|
||||
__slots__ = ("__local", "__dict__", "__name__", "__wrapped__")
|
||||
|
||||
def __init__(self, local, name=None):
|
||||
object.__setattr__(self, "_LocalProxy__local", local)
|
||||
object.__setattr__(self, "__name__", name)
|
||||
if callable(local) and not hasattr(local, "__release_local__"):
|
||||
# "local" is a callable that is not an instance of Local or
|
||||
# LocalManager: mark it as a wrapped function.
|
||||
object.__setattr__(self, "__wrapped__", local)
|
||||
|
||||
def _get_current_object(self):
|
||||
"""Return the current object. This is useful if you want the real
|
||||
object behind the proxy at a time for performance reasons or because
|
||||
you want to pass the object into a different context.
|
||||
"""
|
||||
if not hasattr(self.__local, "__release_local__"):
|
||||
return self.__local()
|
||||
try:
|
||||
return getattr(self.__local, self.__name__)
|
||||
except AttributeError:
|
||||
raise RuntimeError("no object bound to %s" % self.__name__)
|
||||
|
||||
@property
|
||||
def __dict__(self):
|
||||
try:
|
||||
return self._get_current_object().__dict__
|
||||
except RuntimeError:
|
||||
raise AttributeError("__dict__")
|
||||
|
||||
def __repr__(self):
|
||||
try:
|
||||
obj = self._get_current_object()
|
||||
except RuntimeError:
|
||||
return "<%s unbound>" % self.__class__.__name__
|
||||
return repr(obj)
|
||||
|
||||
def __bool__(self):
|
||||
try:
|
||||
return bool(self._get_current_object())
|
||||
except RuntimeError:
|
||||
return False
|
||||
|
||||
def __unicode__(self):
|
||||
try:
|
||||
return unicode(self._get_current_object()) # noqa
|
||||
except RuntimeError:
|
||||
return repr(self)
|
||||
|
||||
def __dir__(self):
|
||||
try:
|
||||
return dir(self._get_current_object())
|
||||
except RuntimeError:
|
||||
return []
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name == "__members__":
|
||||
return dir(self._get_current_object())
|
||||
return getattr(self._get_current_object(), name)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self._get_current_object()[key] = value
|
||||
|
||||
def __delitem__(self, key):
|
||||
del self._get_current_object()[key]
|
||||
|
||||
if PY2:
|
||||
__getslice__ = lambda x, i, j: x._get_current_object()[i:j]
|
||||
|
||||
def __setslice__(self, i, j, seq):
|
||||
self._get_current_object()[i:j] = seq
|
||||
|
||||
def __delslice__(self, i, j):
|
||||
del self._get_current_object()[i:j]
|
||||
|
||||
__setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v)
|
||||
__delattr__ = lambda x, n: delattr(x._get_current_object(), n)
|
||||
__str__ = lambda x: str(x._get_current_object())
|
||||
__lt__ = lambda x, o: x._get_current_object() < o
|
||||
__le__ = lambda x, o: x._get_current_object() <= o
|
||||
__eq__ = lambda x, o: x._get_current_object() == o
|
||||
__ne__ = lambda x, o: x._get_current_object() != o
|
||||
__gt__ = lambda x, o: x._get_current_object() > o
|
||||
__ge__ = lambda x, o: x._get_current_object() >= o
|
||||
__cmp__ = lambda x, o: cmp(x._get_current_object(), o) # noqa
|
||||
__hash__ = lambda x: hash(x._get_current_object())
|
||||
__call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw)
|
||||
__len__ = lambda x: len(x._get_current_object())
|
||||
__getitem__ = lambda x, i: x._get_current_object()[i]
|
||||
__iter__ = lambda x: iter(x._get_current_object())
|
||||
__contains__ = lambda x, i: i in x._get_current_object()
|
||||
__add__ = lambda x, o: x._get_current_object() + o
|
||||
__sub__ = lambda x, o: x._get_current_object() - o
|
||||
__mul__ = lambda x, o: x._get_current_object() * o
|
||||
__floordiv__ = lambda x, o: x._get_current_object() // o
|
||||
__mod__ = lambda x, o: x._get_current_object() % o
|
||||
__divmod__ = lambda x, o: x._get_current_object().__divmod__(o)
|
||||
__pow__ = lambda x, o: x._get_current_object() ** o
|
||||
__lshift__ = lambda x, o: x._get_current_object() << o
|
||||
__rshift__ = lambda x, o: x._get_current_object() >> o
|
||||
__and__ = lambda x, o: x._get_current_object() & o
|
||||
__xor__ = lambda x, o: x._get_current_object() ^ o
|
||||
__or__ = lambda x, o: x._get_current_object() | o
|
||||
__div__ = lambda x, o: x._get_current_object().__div__(o)
|
||||
__truediv__ = lambda x, o: x._get_current_object().__truediv__(o)
|
||||
__neg__ = lambda x: -(x._get_current_object())
|
||||
__pos__ = lambda x: +(x._get_current_object())
|
||||
__abs__ = lambda x: abs(x._get_current_object())
|
||||
__invert__ = lambda x: ~(x._get_current_object())
|
||||
__complex__ = lambda x: complex(x._get_current_object())
|
||||
__int__ = lambda x: int(x._get_current_object())
|
||||
__long__ = lambda x: long(x._get_current_object()) # noqa
|
||||
__float__ = lambda x: float(x._get_current_object())
|
||||
__oct__ = lambda x: oct(x._get_current_object())
|
||||
__hex__ = lambda x: hex(x._get_current_object())
|
||||
__index__ = lambda x: x._get_current_object().__index__()
|
||||
__coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o)
|
||||
__enter__ = lambda x: x._get_current_object().__enter__()
|
||||
__exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw)
|
||||
__radd__ = lambda x, o: o + x._get_current_object()
|
||||
__rsub__ = lambda x, o: o - x._get_current_object()
|
||||
__rmul__ = lambda x, o: o * x._get_current_object()
|
||||
__rdiv__ = lambda x, o: o / x._get_current_object()
|
||||
if PY2:
|
||||
__rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o)
|
||||
else:
|
||||
__rtruediv__ = __rdiv__
|
||||
__rfloordiv__ = lambda x, o: o // x._get_current_object()
|
||||
__rmod__ = lambda x, o: o % x._get_current_object()
|
||||
__rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o)
|
||||
__copy__ = lambda x: copy.copy(x._get_current_object())
|
||||
__deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo)
|
||||
25
python/werkzeug/middleware/__init__.py
Normal file
25
python/werkzeug/middleware/__init__.py
Normal file
@@ -0,0 +1,25 @@
|
||||
"""
|
||||
Middleware
|
||||
==========
|
||||
|
||||
A WSGI middleware is a WSGI application that wraps another application
|
||||
in order to observe or change its behavior. Werkzeug provides some
|
||||
middleware for common use cases.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
proxy_fix
|
||||
shared_data
|
||||
dispatcher
|
||||
http_proxy
|
||||
lint
|
||||
profiler
|
||||
|
||||
The :doc:`interactive debugger </debug>` is also a middleware that can
|
||||
be applied manually, although it is typically used automatically with
|
||||
the :doc:`development server </serving>`.
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
66
python/werkzeug/middleware/dispatcher.py
Normal file
66
python/werkzeug/middleware/dispatcher.py
Normal file
@@ -0,0 +1,66 @@
|
||||
"""
|
||||
Application Dispatcher
|
||||
======================
|
||||
|
||||
This middleware creates a single WSGI application that dispatches to
|
||||
multiple other WSGI applications mounted at different URL paths.
|
||||
|
||||
A common example is writing a Single Page Application, where you have a
|
||||
backend API and a frontend written in JavaScript that does the routing
|
||||
in the browser rather than requesting different pages from the server.
|
||||
The frontend is a single HTML and JS file that should be served for any
|
||||
path besides "/api".
|
||||
|
||||
This example dispatches to an API app under "/api", an admin app
|
||||
under "/admin", and an app that serves frontend files for all other
|
||||
requests::
|
||||
|
||||
app = DispatcherMiddleware(serve_frontend, {
|
||||
'/api': api_app,
|
||||
'/admin': admin_app,
|
||||
})
|
||||
|
||||
In production, you might instead handle this at the HTTP server level,
|
||||
serving files or proxying to application servers based on location. The
|
||||
API and admin apps would each be deployed with a separate WSGI server,
|
||||
and the static files would be served directly by the HTTP server.
|
||||
|
||||
.. autoclass:: DispatcherMiddleware
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
|
||||
|
||||
class DispatcherMiddleware(object):
|
||||
"""Combine multiple applications as a single WSGI application.
|
||||
Requests are dispatched to an application based on the path it is
|
||||
mounted under.
|
||||
|
||||
:param app: The WSGI application to dispatch to if the request
|
||||
doesn't match a mounted path.
|
||||
:param mounts: Maps path prefixes to applications for dispatching.
|
||||
"""
|
||||
|
||||
def __init__(self, app, mounts=None):
|
||||
self.app = app
|
||||
self.mounts = mounts or {}
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
script = environ.get("PATH_INFO", "")
|
||||
path_info = ""
|
||||
|
||||
while "/" in script:
|
||||
if script in self.mounts:
|
||||
app = self.mounts[script]
|
||||
break
|
||||
|
||||
script, last_item = script.rsplit("/", 1)
|
||||
path_info = "/%s%s" % (last_item, path_info)
|
||||
else:
|
||||
app = self.mounts.get(script, self.app)
|
||||
|
||||
original_script_name = environ.get("SCRIPT_NAME", "")
|
||||
environ["SCRIPT_NAME"] = original_script_name + script
|
||||
environ["PATH_INFO"] = path_info
|
||||
return app(environ, start_response)
|
||||
219
python/werkzeug/middleware/http_proxy.py
Normal file
219
python/werkzeug/middleware/http_proxy.py
Normal file
@@ -0,0 +1,219 @@
|
||||
"""
|
||||
Basic HTTP Proxy
|
||||
================
|
||||
|
||||
.. autoclass:: ProxyMiddleware
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import socket
|
||||
|
||||
from ..datastructures import EnvironHeaders
|
||||
from ..http import is_hop_by_hop_header
|
||||
from ..urls import url_parse
|
||||
from ..urls import url_quote
|
||||
from ..wsgi import get_input_stream
|
||||
|
||||
try:
|
||||
from http import client
|
||||
except ImportError:
|
||||
import httplib as client
|
||||
|
||||
|
||||
class ProxyMiddleware(object):
|
||||
"""Proxy requests under a path to an external server, routing other
|
||||
requests to the app.
|
||||
|
||||
This middleware can only proxy HTTP requests, as that is the only
|
||||
protocol handled by the WSGI server. Other protocols, such as
|
||||
websocket requests, cannot be proxied at this layer. This should
|
||||
only be used for development, in production a real proxying server
|
||||
should be used.
|
||||
|
||||
The middleware takes a dict that maps a path prefix to a dict
|
||||
describing the host to be proxied to::
|
||||
|
||||
app = ProxyMiddleware(app, {
|
||||
"/static/": {
|
||||
"target": "http://127.0.0.1:5001/",
|
||||
}
|
||||
})
|
||||
|
||||
Each host has the following options:
|
||||
|
||||
``target``:
|
||||
The target URL to dispatch to. This is required.
|
||||
``remove_prefix``:
|
||||
Whether to remove the prefix from the URL before dispatching it
|
||||
to the target. The default is ``False``.
|
||||
``host``:
|
||||
``"<auto>"`` (default):
|
||||
The host header is automatically rewritten to the URL of the
|
||||
target.
|
||||
``None``:
|
||||
The host header is unmodified from the client request.
|
||||
Any other value:
|
||||
The host header is overwritten with the value.
|
||||
``headers``:
|
||||
A dictionary of headers to be sent with the request to the
|
||||
target. The default is ``{}``.
|
||||
``ssl_context``:
|
||||
A :class:`ssl.SSLContext` defining how to verify requests if the
|
||||
target is HTTPS. The default is ``None``.
|
||||
|
||||
In the example above, everything under ``"/static/"`` is proxied to
|
||||
the server on port 5001. The host header is rewritten to the target,
|
||||
and the ``"/static/"`` prefix is removed from the URLs.
|
||||
|
||||
:param app: The WSGI application to wrap.
|
||||
:param targets: Proxy target configurations. See description above.
|
||||
:param chunk_size: Size of chunks to read from input stream and
|
||||
write to target.
|
||||
:param timeout: Seconds before an operation to a target fails.
|
||||
|
||||
.. versionadded:: 0.14
|
||||
"""
|
||||
|
||||
def __init__(self, app, targets, chunk_size=2 << 13, timeout=10):
|
||||
def _set_defaults(opts):
|
||||
opts.setdefault("remove_prefix", False)
|
||||
opts.setdefault("host", "<auto>")
|
||||
opts.setdefault("headers", {})
|
||||
opts.setdefault("ssl_context", None)
|
||||
return opts
|
||||
|
||||
self.app = app
|
||||
self.targets = dict(
|
||||
("/%s/" % k.strip("/"), _set_defaults(v)) for k, v in targets.items()
|
||||
)
|
||||
self.chunk_size = chunk_size
|
||||
self.timeout = timeout
|
||||
|
||||
def proxy_to(self, opts, path, prefix):
|
||||
target = url_parse(opts["target"])
|
||||
|
||||
def application(environ, start_response):
|
||||
headers = list(EnvironHeaders(environ).items())
|
||||
headers[:] = [
|
||||
(k, v)
|
||||
for k, v in headers
|
||||
if not is_hop_by_hop_header(k)
|
||||
and k.lower() not in ("content-length", "host")
|
||||
]
|
||||
headers.append(("Connection", "close"))
|
||||
|
||||
if opts["host"] == "<auto>":
|
||||
headers.append(("Host", target.ascii_host))
|
||||
elif opts["host"] is None:
|
||||
headers.append(("Host", environ["HTTP_HOST"]))
|
||||
else:
|
||||
headers.append(("Host", opts["host"]))
|
||||
|
||||
headers.extend(opts["headers"].items())
|
||||
remote_path = path
|
||||
|
||||
if opts["remove_prefix"]:
|
||||
remote_path = "%s/%s" % (
|
||||
target.path.rstrip("/"),
|
||||
remote_path[len(prefix) :].lstrip("/"),
|
||||
)
|
||||
|
||||
content_length = environ.get("CONTENT_LENGTH")
|
||||
chunked = False
|
||||
|
||||
if content_length not in ("", None):
|
||||
headers.append(("Content-Length", content_length))
|
||||
elif content_length is not None:
|
||||
headers.append(("Transfer-Encoding", "chunked"))
|
||||
chunked = True
|
||||
|
||||
try:
|
||||
if target.scheme == "http":
|
||||
con = client.HTTPConnection(
|
||||
target.ascii_host, target.port or 80, timeout=self.timeout
|
||||
)
|
||||
elif target.scheme == "https":
|
||||
con = client.HTTPSConnection(
|
||||
target.ascii_host,
|
||||
target.port or 443,
|
||||
timeout=self.timeout,
|
||||
context=opts["ssl_context"],
|
||||
)
|
||||
else:
|
||||
raise RuntimeError(
|
||||
"Target scheme must be 'http' or 'https', got '{}'.".format(
|
||||
target.scheme
|
||||
)
|
||||
)
|
||||
|
||||
con.connect()
|
||||
remote_url = url_quote(remote_path)
|
||||
querystring = environ["QUERY_STRING"]
|
||||
|
||||
if querystring:
|
||||
remote_url = remote_url + "?" + querystring
|
||||
|
||||
con.putrequest(environ["REQUEST_METHOD"], remote_url, skip_host=True)
|
||||
|
||||
for k, v in headers:
|
||||
if k.lower() == "connection":
|
||||
v = "close"
|
||||
|
||||
con.putheader(k, v)
|
||||
|
||||
con.endheaders()
|
||||
stream = get_input_stream(environ)
|
||||
|
||||
while 1:
|
||||
data = stream.read(self.chunk_size)
|
||||
|
||||
if not data:
|
||||
break
|
||||
|
||||
if chunked:
|
||||
con.send(b"%x\r\n%s\r\n" % (len(data), data))
|
||||
else:
|
||||
con.send(data)
|
||||
|
||||
resp = con.getresponse()
|
||||
except socket.error:
|
||||
from ..exceptions import BadGateway
|
||||
|
||||
return BadGateway()(environ, start_response)
|
||||
|
||||
start_response(
|
||||
"%d %s" % (resp.status, resp.reason),
|
||||
[
|
||||
(k.title(), v)
|
||||
for k, v in resp.getheaders()
|
||||
if not is_hop_by_hop_header(k)
|
||||
],
|
||||
)
|
||||
|
||||
def read():
|
||||
while 1:
|
||||
try:
|
||||
data = resp.read(self.chunk_size)
|
||||
except socket.error:
|
||||
break
|
||||
|
||||
if not data:
|
||||
break
|
||||
|
||||
yield data
|
||||
|
||||
return read()
|
||||
|
||||
return application
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
path = environ["PATH_INFO"]
|
||||
app = self.app
|
||||
|
||||
for prefix, opts in self.targets.items():
|
||||
if path.startswith(prefix):
|
||||
app = self.proxy_to(opts, path, prefix)
|
||||
break
|
||||
|
||||
return app(environ, start_response)
|
||||
408
python/werkzeug/middleware/lint.py
Normal file
408
python/werkzeug/middleware/lint.py
Normal file
@@ -0,0 +1,408 @@
|
||||
"""
|
||||
WSGI Protocol Linter
|
||||
====================
|
||||
|
||||
This module provides a middleware that performs sanity checks on the
|
||||
behavior of the WSGI server and application. It checks that the
|
||||
:pep:`3333` WSGI spec is properly implemented. It also warns on some
|
||||
common HTTP errors such as non-empty responses for 304 status codes.
|
||||
|
||||
.. autoclass:: LintMiddleware
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
from warnings import warn
|
||||
|
||||
from .._compat import implements_iterator
|
||||
from .._compat import PY2
|
||||
from .._compat import string_types
|
||||
from ..datastructures import Headers
|
||||
from ..http import is_entity_header
|
||||
from ..wsgi import FileWrapper
|
||||
|
||||
try:
|
||||
from urllib.parse import urlparse
|
||||
except ImportError:
|
||||
from urlparse import urlparse
|
||||
|
||||
|
||||
class WSGIWarning(Warning):
|
||||
"""Warning class for WSGI warnings."""
|
||||
|
||||
|
||||
class HTTPWarning(Warning):
|
||||
"""Warning class for HTTP warnings."""
|
||||
|
||||
|
||||
def check_string(context, obj, stacklevel=3):
|
||||
if type(obj) is not str:
|
||||
warn(
|
||||
"'%s' requires strings, got '%s'" % (context, type(obj).__name__),
|
||||
WSGIWarning,
|
||||
)
|
||||
|
||||
|
||||
class InputStream(object):
|
||||
def __init__(self, stream):
|
||||
self._stream = stream
|
||||
|
||||
def read(self, *args):
|
||||
if len(args) == 0:
|
||||
warn(
|
||||
"WSGI does not guarantee an EOF marker on the input stream, thus making"
|
||||
" calls to 'wsgi.input.read()' unsafe. Conforming servers may never"
|
||||
" return from this call.",
|
||||
WSGIWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
elif len(args) != 1:
|
||||
warn(
|
||||
"Too many parameters passed to 'wsgi.input.read()'.",
|
||||
WSGIWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return self._stream.read(*args)
|
||||
|
||||
def readline(self, *args):
|
||||
if len(args) == 0:
|
||||
warn(
|
||||
"Calls to 'wsgi.input.readline()' without arguments are unsafe. Use"
|
||||
" 'wsgi.input.read()' instead.",
|
||||
WSGIWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
elif len(args) == 1:
|
||||
warn(
|
||||
"'wsgi.input.readline()' was called with a size hint. WSGI does not"
|
||||
" support this, although it's available on all major servers.",
|
||||
WSGIWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
else:
|
||||
raise TypeError("Too many arguments passed to 'wsgi.input.readline()'.")
|
||||
return self._stream.readline(*args)
|
||||
|
||||
def __iter__(self):
|
||||
try:
|
||||
return iter(self._stream)
|
||||
except TypeError:
|
||||
warn("'wsgi.input' is not iterable.", WSGIWarning, stacklevel=2)
|
||||
return iter(())
|
||||
|
||||
def close(self):
|
||||
warn("The application closed the input stream!", WSGIWarning, stacklevel=2)
|
||||
self._stream.close()
|
||||
|
||||
|
||||
class ErrorStream(object):
|
||||
def __init__(self, stream):
|
||||
self._stream = stream
|
||||
|
||||
def write(self, s):
|
||||
check_string("wsgi.error.write()", s)
|
||||
self._stream.write(s)
|
||||
|
||||
def flush(self):
|
||||
self._stream.flush()
|
||||
|
||||
def writelines(self, seq):
|
||||
for line in seq:
|
||||
self.write(line)
|
||||
|
||||
def close(self):
|
||||
warn("The application closed the error stream!", WSGIWarning, stacklevel=2)
|
||||
self._stream.close()
|
||||
|
||||
|
||||
class GuardedWrite(object):
|
||||
def __init__(self, write, chunks):
|
||||
self._write = write
|
||||
self._chunks = chunks
|
||||
|
||||
def __call__(self, s):
|
||||
check_string("write()", s)
|
||||
self._write.write(s)
|
||||
self._chunks.append(len(s))
|
||||
|
||||
|
||||
@implements_iterator
|
||||
class GuardedIterator(object):
|
||||
def __init__(self, iterator, headers_set, chunks):
|
||||
self._iterator = iterator
|
||||
if PY2:
|
||||
self._next = iter(iterator).next
|
||||
else:
|
||||
self._next = iter(iterator).__next__
|
||||
self.closed = False
|
||||
self.headers_set = headers_set
|
||||
self.chunks = chunks
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
if self.closed:
|
||||
warn("Iterated over closed 'app_iter'.", WSGIWarning, stacklevel=2)
|
||||
|
||||
rv = self._next()
|
||||
|
||||
if not self.headers_set:
|
||||
warn(
|
||||
"The application returned before it started the response.",
|
||||
WSGIWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
check_string("application iterator items", rv)
|
||||
self.chunks.append(len(rv))
|
||||
return rv
|
||||
|
||||
def close(self):
|
||||
self.closed = True
|
||||
|
||||
if hasattr(self._iterator, "close"):
|
||||
self._iterator.close()
|
||||
|
||||
if self.headers_set:
|
||||
status_code, headers = self.headers_set
|
||||
bytes_sent = sum(self.chunks)
|
||||
content_length = headers.get("content-length", type=int)
|
||||
|
||||
if status_code == 304:
|
||||
for key, _value in headers:
|
||||
key = key.lower()
|
||||
if key not in ("expires", "content-location") and is_entity_header(
|
||||
key
|
||||
):
|
||||
warn(
|
||||
"Entity header %r found in 304 response." % key, HTTPWarning
|
||||
)
|
||||
if bytes_sent:
|
||||
warn("304 responses must not have a body.", HTTPWarning)
|
||||
elif 100 <= status_code < 200 or status_code == 204:
|
||||
if content_length != 0:
|
||||
warn(
|
||||
"%r responses must have an empty content length." % status_code,
|
||||
HTTPWarning,
|
||||
)
|
||||
if bytes_sent:
|
||||
warn(
|
||||
"%r responses must not have a body." % status_code, HTTPWarning
|
||||
)
|
||||
elif content_length is not None and content_length != bytes_sent:
|
||||
warn(
|
||||
"Content-Length and the number of bytes sent to the client do not"
|
||||
" match.",
|
||||
WSGIWarning,
|
||||
)
|
||||
|
||||
def __del__(self):
|
||||
if not self.closed:
|
||||
try:
|
||||
warn(
|
||||
"Iterator was garbage collected before it was closed.", WSGIWarning
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
class LintMiddleware(object):
|
||||
"""Warns about common errors in the WSGI and HTTP behavior of the
|
||||
server and wrapped application. Some of the issues it check are:
|
||||
|
||||
- invalid status codes
|
||||
- non-bytestrings sent to the WSGI server
|
||||
- strings returned from the WSGI application
|
||||
- non-empty conditional responses
|
||||
- unquoted etags
|
||||
- relative URLs in the Location header
|
||||
- unsafe calls to wsgi.input
|
||||
- unclosed iterators
|
||||
|
||||
Error information is emitted using the :mod:`warnings` module.
|
||||
|
||||
:param app: The WSGI application to wrap.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from werkzeug.middleware.lint import LintMiddleware
|
||||
app = LintMiddleware(app)
|
||||
"""
|
||||
|
||||
def __init__(self, app):
|
||||
self.app = app
|
||||
|
||||
def check_environ(self, environ):
|
||||
if type(environ) is not dict:
|
||||
warn(
|
||||
"WSGI environment is not a standard Python dict.",
|
||||
WSGIWarning,
|
||||
stacklevel=4,
|
||||
)
|
||||
for key in (
|
||||
"REQUEST_METHOD",
|
||||
"SERVER_NAME",
|
||||
"SERVER_PORT",
|
||||
"wsgi.version",
|
||||
"wsgi.input",
|
||||
"wsgi.errors",
|
||||
"wsgi.multithread",
|
||||
"wsgi.multiprocess",
|
||||
"wsgi.run_once",
|
||||
):
|
||||
if key not in environ:
|
||||
warn(
|
||||
"Required environment key %r not found" % key,
|
||||
WSGIWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
if environ["wsgi.version"] != (1, 0):
|
||||
warn("Environ is not a WSGI 1.0 environ.", WSGIWarning, stacklevel=3)
|
||||
|
||||
script_name = environ.get("SCRIPT_NAME", "")
|
||||
path_info = environ.get("PATH_INFO", "")
|
||||
|
||||
if script_name and script_name[0] != "/":
|
||||
warn(
|
||||
"'SCRIPT_NAME' does not start with a slash: %r" % script_name,
|
||||
WSGIWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
|
||||
if path_info and path_info[0] != "/":
|
||||
warn(
|
||||
"'PATH_INFO' does not start with a slash: %r" % path_info,
|
||||
WSGIWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
|
||||
def check_start_response(self, status, headers, exc_info):
|
||||
check_string("status", status)
|
||||
status_code = status.split(None, 1)[0]
|
||||
|
||||
if len(status_code) != 3 or not status_code.isdigit():
|
||||
warn(WSGIWarning("Status code must be three digits"), stacklevel=3)
|
||||
|
||||
if len(status) < 4 or status[3] != " ":
|
||||
warn(
|
||||
WSGIWarning(
|
||||
"Invalid value for status %r. Valid "
|
||||
"status strings are three digits, a space "
|
||||
"and a status explanation"
|
||||
),
|
||||
stacklevel=3,
|
||||
)
|
||||
|
||||
status_code = int(status_code)
|
||||
|
||||
if status_code < 100:
|
||||
warn(WSGIWarning("status code < 100 detected"), stacklevel=3)
|
||||
|
||||
if type(headers) is not list:
|
||||
warn(WSGIWarning("header list is not a list"), stacklevel=3)
|
||||
|
||||
for item in headers:
|
||||
if type(item) is not tuple or len(item) != 2:
|
||||
warn(WSGIWarning("Headers must tuple 2-item tuples"), stacklevel=3)
|
||||
name, value = item
|
||||
if type(name) is not str or type(value) is not str:
|
||||
warn(WSGIWarning("header items must be strings"), stacklevel=3)
|
||||
if name.lower() == "status":
|
||||
warn(
|
||||
WSGIWarning(
|
||||
"The status header is not supported due to "
|
||||
"conflicts with the CGI spec."
|
||||
),
|
||||
stacklevel=3,
|
||||
)
|
||||
|
||||
if exc_info is not None and not isinstance(exc_info, tuple):
|
||||
warn(WSGIWarning("invalid value for exc_info"), stacklevel=3)
|
||||
|
||||
headers = Headers(headers)
|
||||
self.check_headers(headers)
|
||||
|
||||
return status_code, headers
|
||||
|
||||
def check_headers(self, headers):
|
||||
etag = headers.get("etag")
|
||||
|
||||
if etag is not None:
|
||||
if etag.startswith(("W/", "w/")):
|
||||
if etag.startswith("w/"):
|
||||
warn(
|
||||
HTTPWarning("weak etag indicator should be upcase."),
|
||||
stacklevel=4,
|
||||
)
|
||||
|
||||
etag = etag[2:]
|
||||
|
||||
if not (etag[:1] == etag[-1:] == '"'):
|
||||
warn(HTTPWarning("unquoted etag emitted."), stacklevel=4)
|
||||
|
||||
location = headers.get("location")
|
||||
|
||||
if location is not None:
|
||||
if not urlparse(location).netloc:
|
||||
warn(
|
||||
HTTPWarning("absolute URLs required for location header"),
|
||||
stacklevel=4,
|
||||
)
|
||||
|
||||
def check_iterator(self, app_iter):
|
||||
if isinstance(app_iter, string_types):
|
||||
warn(
|
||||
"The application returned astring. The response will send one character"
|
||||
" at a time to the client, which will kill performance. Return a list"
|
||||
" or iterable instead.",
|
||||
WSGIWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
if len(args) != 2:
|
||||
warn("A WSGI app takes two arguments.", WSGIWarning, stacklevel=2)
|
||||
|
||||
if kwargs:
|
||||
warn(
|
||||
"A WSGI app does not take keyword arguments.", WSGIWarning, stacklevel=2
|
||||
)
|
||||
|
||||
environ, start_response = args
|
||||
|
||||
self.check_environ(environ)
|
||||
environ["wsgi.input"] = InputStream(environ["wsgi.input"])
|
||||
environ["wsgi.errors"] = ErrorStream(environ["wsgi.errors"])
|
||||
|
||||
# Hook our own file wrapper in so that applications will always
|
||||
# iterate to the end and we can check the content length.
|
||||
environ["wsgi.file_wrapper"] = FileWrapper
|
||||
|
||||
headers_set = []
|
||||
chunks = []
|
||||
|
||||
def checking_start_response(*args, **kwargs):
|
||||
if len(args) not in (2, 3):
|
||||
warn(
|
||||
"Invalid number of arguments: %s, expected 2 or 3." % len(args),
|
||||
WSGIWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
if kwargs:
|
||||
warn("'start_response' does not take keyword arguments.", WSGIWarning)
|
||||
|
||||
status, headers = args[:2]
|
||||
|
||||
if len(args) == 3:
|
||||
exc_info = args[2]
|
||||
else:
|
||||
exc_info = None
|
||||
|
||||
headers_set[:] = self.check_start_response(status, headers, exc_info)
|
||||
return GuardedWrite(start_response(status, headers, exc_info), chunks)
|
||||
|
||||
app_iter = self.app(environ, checking_start_response)
|
||||
self.check_iterator(app_iter)
|
||||
return GuardedIterator(app_iter, headers_set, chunks)
|
||||
132
python/werkzeug/middleware/profiler.py
Normal file
132
python/werkzeug/middleware/profiler.py
Normal file
@@ -0,0 +1,132 @@
|
||||
"""
|
||||
Application Profiler
|
||||
====================
|
||||
|
||||
This module provides a middleware that profiles each request with the
|
||||
:mod:`cProfile` module. This can help identify bottlenecks in your code
|
||||
that may be slowing down your application.
|
||||
|
||||
.. autoclass:: ProfilerMiddleware
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
import os.path
|
||||
import sys
|
||||
import time
|
||||
from pstats import Stats
|
||||
|
||||
try:
|
||||
from cProfile import Profile
|
||||
except ImportError:
|
||||
from profile import Profile
|
||||
|
||||
|
||||
class ProfilerMiddleware(object):
|
||||
"""Wrap a WSGI application and profile the execution of each
|
||||
request. Responses are buffered so that timings are more exact.
|
||||
|
||||
If ``stream`` is given, :class:`pstats.Stats` are written to it
|
||||
after each request. If ``profile_dir`` is given, :mod:`cProfile`
|
||||
data files are saved to that directory, one file per request.
|
||||
|
||||
The filename can be customized by passing ``filename_format``. If
|
||||
it is a string, it will be formatted using :meth:`str.format` with
|
||||
the following fields available:
|
||||
|
||||
- ``{method}`` - The request method; GET, POST, etc.
|
||||
- ``{path}`` - The request path or 'root' should one not exist.
|
||||
- ``{elapsed}`` - The elapsed time of the request.
|
||||
- ``{time}`` - The time of the request.
|
||||
|
||||
If it is a callable, it will be called with the WSGI ``environ``
|
||||
dict and should return a filename.
|
||||
|
||||
:param app: The WSGI application to wrap.
|
||||
:param stream: Write stats to this stream. Disable with ``None``.
|
||||
:param sort_by: A tuple of columns to sort stats by. See
|
||||
:meth:`pstats.Stats.sort_stats`.
|
||||
:param restrictions: A tuple of restrictions to filter stats by. See
|
||||
:meth:`pstats.Stats.print_stats`.
|
||||
:param profile_dir: Save profile data files to this directory.
|
||||
:param filename_format: Format string for profile data file names,
|
||||
or a callable returning a name. See explanation above.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from werkzeug.middleware.profiler import ProfilerMiddleware
|
||||
app = ProfilerMiddleware(app)
|
||||
|
||||
.. versionchanged:: 0.15
|
||||
Stats are written even if ``profile_dir`` is given, and can be
|
||||
disable by passing ``stream=None``.
|
||||
|
||||
.. versionadded:: 0.15
|
||||
Added ``filename_format``.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
Added ``restrictions`` and ``profile_dir``.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app,
|
||||
stream=sys.stdout,
|
||||
sort_by=("time", "calls"),
|
||||
restrictions=(),
|
||||
profile_dir=None,
|
||||
filename_format="{method}.{path}.{elapsed:.0f}ms.{time:.0f}.prof",
|
||||
):
|
||||
self._app = app
|
||||
self._stream = stream
|
||||
self._sort_by = sort_by
|
||||
self._restrictions = restrictions
|
||||
self._profile_dir = profile_dir
|
||||
self._filename_format = filename_format
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
response_body = []
|
||||
|
||||
def catching_start_response(status, headers, exc_info=None):
|
||||
start_response(status, headers, exc_info)
|
||||
return response_body.append
|
||||
|
||||
def runapp():
|
||||
app_iter = self._app(environ, catching_start_response)
|
||||
response_body.extend(app_iter)
|
||||
|
||||
if hasattr(app_iter, "close"):
|
||||
app_iter.close()
|
||||
|
||||
profile = Profile()
|
||||
start = time.time()
|
||||
profile.runcall(runapp)
|
||||
body = b"".join(response_body)
|
||||
elapsed = time.time() - start
|
||||
|
||||
if self._profile_dir is not None:
|
||||
if callable(self._filename_format):
|
||||
filename = self._filename_format(environ)
|
||||
else:
|
||||
filename = self._filename_format.format(
|
||||
method=environ["REQUEST_METHOD"],
|
||||
path=(
|
||||
environ.get("PATH_INFO").strip("/").replace("/", ".") or "root"
|
||||
),
|
||||
elapsed=elapsed * 1000.0,
|
||||
time=time.time(),
|
||||
)
|
||||
filename = os.path.join(self._profile_dir, filename)
|
||||
profile.dump_stats(filename)
|
||||
|
||||
if self._stream is not None:
|
||||
stats = Stats(profile, stream=self._stream)
|
||||
stats.sort_stats(*self._sort_by)
|
||||
print("-" * 80, file=self._stream)
|
||||
print("PATH: {!r}".format(environ.get("PATH_INFO", "")), file=self._stream)
|
||||
stats.print_stats(*self._restrictions)
|
||||
print("-" * 80 + "\n", file=self._stream)
|
||||
|
||||
return [body]
|
||||
228
python/werkzeug/middleware/proxy_fix.py
Normal file
228
python/werkzeug/middleware/proxy_fix.py
Normal file
@@ -0,0 +1,228 @@
|
||||
"""
|
||||
X-Forwarded-For Proxy Fix
|
||||
=========================
|
||||
|
||||
This module provides a middleware that adjusts the WSGI environ based on
|
||||
``X-Forwarded-`` headers that proxies in front of an application may
|
||||
set.
|
||||
|
||||
When an application is running behind a proxy server, WSGI may see the
|
||||
request as coming from that server rather than the real client. Proxies
|
||||
set various headers to track where the request actually came from.
|
||||
|
||||
This middleware should only be applied if the application is actually
|
||||
behind such a proxy, and should be configured with the number of proxies
|
||||
that are chained in front of it. Not all proxies set all the headers.
|
||||
Since incoming headers can be faked, you must set how many proxies are
|
||||
setting each header so the middleware knows what to trust.
|
||||
|
||||
.. autoclass:: ProxyFix
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import warnings
|
||||
|
||||
|
||||
class ProxyFix(object):
|
||||
"""Adjust the WSGI environ based on ``X-Forwarded-`` that proxies in
|
||||
front of the application may set.
|
||||
|
||||
- ``X-Forwarded-For`` sets ``REMOTE_ADDR``.
|
||||
- ``X-Forwarded-Proto`` sets ``wsgi.url_scheme``.
|
||||
- ``X-Forwarded-Host`` sets ``HTTP_HOST``, ``SERVER_NAME``, and
|
||||
``SERVER_PORT``.
|
||||
- ``X-Forwarded-Port`` sets ``HTTP_HOST`` and ``SERVER_PORT``.
|
||||
- ``X-Forwarded-Prefix`` sets ``SCRIPT_NAME``.
|
||||
|
||||
You must tell the middleware how many proxies set each header so it
|
||||
knows what values to trust. It is a security issue to trust values
|
||||
that came from the client rather than a proxy.
|
||||
|
||||
The original values of the headers are stored in the WSGI
|
||||
environ as ``werkzeug.proxy_fix.orig``, a dict.
|
||||
|
||||
:param app: The WSGI application to wrap.
|
||||
:param x_for: Number of values to trust for ``X-Forwarded-For``.
|
||||
:param x_proto: Number of values to trust for ``X-Forwarded-Proto``.
|
||||
:param x_host: Number of values to trust for ``X-Forwarded-Host``.
|
||||
:param x_port: Number of values to trust for ``X-Forwarded-Port``.
|
||||
:param x_prefix: Number of values to trust for
|
||||
``X-Forwarded-Prefix``.
|
||||
:param num_proxies: Deprecated, use ``x_for`` instead.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from werkzeug.middleware.proxy_fix import ProxyFix
|
||||
# App is behind one proxy that sets the -For and -Host headers.
|
||||
app = ProxyFix(app, x_for=1, x_host=1)
|
||||
|
||||
.. versionchanged:: 0.15
|
||||
All headers support multiple values. The ``num_proxies``
|
||||
argument is deprecated. Each header is configured with a
|
||||
separate number of trusted proxies.
|
||||
|
||||
.. versionchanged:: 0.15
|
||||
Original WSGI environ values are stored in the
|
||||
``werkzeug.proxy_fix.orig`` dict. ``orig_remote_addr``,
|
||||
``orig_wsgi_url_scheme``, and ``orig_http_host`` are deprecated
|
||||
and will be removed in 1.0.
|
||||
|
||||
.. versionchanged:: 0.15
|
||||
Support ``X-Forwarded-Port`` and ``X-Forwarded-Prefix``.
|
||||
|
||||
.. versionchanged:: 0.15
|
||||
``X-Fowarded-Host`` and ``X-Forwarded-Port`` modify
|
||||
``SERVER_NAME`` and ``SERVER_PORT``.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, app, num_proxies=None, x_for=1, x_proto=0, x_host=0, x_port=0, x_prefix=0
|
||||
):
|
||||
self.app = app
|
||||
self.x_for = x_for
|
||||
self.x_proto = x_proto
|
||||
self.x_host = x_host
|
||||
self.x_port = x_port
|
||||
self.x_prefix = x_prefix
|
||||
self.num_proxies = num_proxies
|
||||
|
||||
@property
|
||||
def num_proxies(self):
|
||||
"""The number of proxies setting ``X-Forwarded-For`` in front
|
||||
of the application.
|
||||
|
||||
.. deprecated:: 0.15
|
||||
A separate number of trusted proxies is configured for each
|
||||
header. ``num_proxies`` maps to ``x_for``. This method will
|
||||
be removed in 1.0.
|
||||
|
||||
:internal:
|
||||
"""
|
||||
warnings.warn(
|
||||
"'num_proxies' is deprecated as of version 0.15 and will be"
|
||||
" removed in version 1.0. Use 'x_for' instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return self.x_for
|
||||
|
||||
@num_proxies.setter
|
||||
def num_proxies(self, value):
|
||||
if value is not None:
|
||||
warnings.warn(
|
||||
"'num_proxies' is deprecated as of version 0.15 and"
|
||||
" will be removed in version 1.0. Use 'x_for' instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
self.x_for = value
|
||||
|
||||
def get_remote_addr(self, forwarded_for):
|
||||
"""Get the real ``remote_addr`` by looking backwards ``x_for``
|
||||
number of values in the ``X-Forwarded-For`` header.
|
||||
|
||||
:param forwarded_for: List of values parsed from the
|
||||
``X-Forwarded-For`` header.
|
||||
:return: The real ``remote_addr``, or ``None`` if there were not
|
||||
at least ``x_for`` values.
|
||||
|
||||
.. deprecated:: 0.15
|
||||
This is handled internally for each header. This method will
|
||||
be removed in 1.0.
|
||||
|
||||
.. versionchanged:: 0.9
|
||||
Use ``num_proxies`` instead of always picking the first
|
||||
value.
|
||||
|
||||
.. versionadded:: 0.8
|
||||
"""
|
||||
warnings.warn(
|
||||
"'get_remote_addr' is deprecated as of version 0.15 and"
|
||||
" will be removed in version 1.0. It is now handled"
|
||||
" internally for each header.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
return self._get_trusted_comma(self.x_for, ",".join(forwarded_for))
|
||||
|
||||
def _get_trusted_comma(self, trusted, value):
|
||||
"""Get the real value from a comma-separated header based on the
|
||||
configured number of trusted proxies.
|
||||
|
||||
:param trusted: Number of values to trust in the header.
|
||||
:param value: Header value to parse.
|
||||
:return: The real value, or ``None`` if there are fewer values
|
||||
than the number of trusted proxies.
|
||||
|
||||
.. versionadded:: 0.15
|
||||
"""
|
||||
if not (trusted and value):
|
||||
return
|
||||
values = [x.strip() for x in value.split(",")]
|
||||
if len(values) >= trusted:
|
||||
return values[-trusted]
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
"""Modify the WSGI environ based on the various ``Forwarded``
|
||||
headers before calling the wrapped application. Store the
|
||||
original environ values in ``werkzeug.proxy_fix.orig_{key}``.
|
||||
"""
|
||||
environ_get = environ.get
|
||||
orig_remote_addr = environ_get("REMOTE_ADDR")
|
||||
orig_wsgi_url_scheme = environ_get("wsgi.url_scheme")
|
||||
orig_http_host = environ_get("HTTP_HOST")
|
||||
environ.update(
|
||||
{
|
||||
"werkzeug.proxy_fix.orig": {
|
||||
"REMOTE_ADDR": orig_remote_addr,
|
||||
"wsgi.url_scheme": orig_wsgi_url_scheme,
|
||||
"HTTP_HOST": orig_http_host,
|
||||
"SERVER_NAME": environ_get("SERVER_NAME"),
|
||||
"SERVER_PORT": environ_get("SERVER_PORT"),
|
||||
"SCRIPT_NAME": environ_get("SCRIPT_NAME"),
|
||||
},
|
||||
# todo: remove deprecated keys
|
||||
"werkzeug.proxy_fix.orig_remote_addr": orig_remote_addr,
|
||||
"werkzeug.proxy_fix.orig_wsgi_url_scheme": orig_wsgi_url_scheme,
|
||||
"werkzeug.proxy_fix.orig_http_host": orig_http_host,
|
||||
}
|
||||
)
|
||||
|
||||
x_for = self._get_trusted_comma(self.x_for, environ_get("HTTP_X_FORWARDED_FOR"))
|
||||
if x_for:
|
||||
environ["REMOTE_ADDR"] = x_for
|
||||
|
||||
x_proto = self._get_trusted_comma(
|
||||
self.x_proto, environ_get("HTTP_X_FORWARDED_PROTO")
|
||||
)
|
||||
if x_proto:
|
||||
environ["wsgi.url_scheme"] = x_proto
|
||||
|
||||
x_host = self._get_trusted_comma(
|
||||
self.x_host, environ_get("HTTP_X_FORWARDED_HOST")
|
||||
)
|
||||
if x_host:
|
||||
environ["HTTP_HOST"] = x_host
|
||||
parts = x_host.split(":", 1)
|
||||
environ["SERVER_NAME"] = parts[0]
|
||||
if len(parts) == 2:
|
||||
environ["SERVER_PORT"] = parts[1]
|
||||
|
||||
x_port = self._get_trusted_comma(
|
||||
self.x_port, environ_get("HTTP_X_FORWARDED_PORT")
|
||||
)
|
||||
if x_port:
|
||||
host = environ.get("HTTP_HOST")
|
||||
if host:
|
||||
parts = host.split(":", 1)
|
||||
host = parts[0] if len(parts) == 2 else host
|
||||
environ["HTTP_HOST"] = "%s:%s" % (host, x_port)
|
||||
environ["SERVER_PORT"] = x_port
|
||||
|
||||
x_prefix = self._get_trusted_comma(
|
||||
self.x_prefix, environ_get("HTTP_X_FORWARDED_PREFIX")
|
||||
)
|
||||
if x_prefix:
|
||||
environ["SCRIPT_NAME"] = x_prefix
|
||||
|
||||
return self.app(environ, start_response)
|
||||
260
python/werkzeug/middleware/shared_data.py
Normal file
260
python/werkzeug/middleware/shared_data.py
Normal file
@@ -0,0 +1,260 @@
|
||||
"""
|
||||
Serve Shared Static Files
|
||||
=========================
|
||||
|
||||
.. autoclass:: SharedDataMiddleware
|
||||
:members: is_allowed
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import mimetypes
|
||||
import os
|
||||
import posixpath
|
||||
from datetime import datetime
|
||||
from io import BytesIO
|
||||
from time import mktime
|
||||
from time import time
|
||||
from zlib import adler32
|
||||
|
||||
from .._compat import PY2
|
||||
from .._compat import string_types
|
||||
from ..filesystem import get_filesystem_encoding
|
||||
from ..http import http_date
|
||||
from ..http import is_resource_modified
|
||||
from ..wsgi import get_path_info
|
||||
from ..wsgi import wrap_file
|
||||
|
||||
|
||||
class SharedDataMiddleware(object):
|
||||
|
||||
"""A WSGI middleware that provides static content for development
|
||||
environments or simple server setups. Usage is quite simple::
|
||||
|
||||
import os
|
||||
from werkzeug.wsgi import SharedDataMiddleware
|
||||
|
||||
app = SharedDataMiddleware(app, {
|
||||
'/static': os.path.join(os.path.dirname(__file__), 'static')
|
||||
})
|
||||
|
||||
The contents of the folder ``./shared`` will now be available on
|
||||
``http://example.com/shared/``. This is pretty useful during development
|
||||
because a standalone media server is not required. One can also mount
|
||||
files on the root folder and still continue to use the application because
|
||||
the shared data middleware forwards all unhandled requests to the
|
||||
application, even if the requests are below one of the shared folders.
|
||||
|
||||
If `pkg_resources` is available you can also tell the middleware to serve
|
||||
files from package data::
|
||||
|
||||
app = SharedDataMiddleware(app, {
|
||||
'/static': ('myapplication', 'static')
|
||||
})
|
||||
|
||||
This will then serve the ``static`` folder in the `myapplication`
|
||||
Python package.
|
||||
|
||||
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
|
||||
rules for files that are not accessible from the web. If `cache` is set to
|
||||
`False` no caching headers are sent.
|
||||
|
||||
Currently the middleware does not support non ASCII filenames. If the
|
||||
encoding on the file system happens to be the encoding of the URI it may
|
||||
work but this could also be by accident. We strongly suggest using ASCII
|
||||
only file names for static files.
|
||||
|
||||
The middleware will guess the mimetype using the Python `mimetype`
|
||||
module. If it's unable to figure out the charset it will fall back
|
||||
to `fallback_mimetype`.
|
||||
|
||||
.. versionchanged:: 0.5
|
||||
The cache timeout is configurable now.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
The `fallback_mimetype` parameter was added.
|
||||
|
||||
:param app: the application to wrap. If you don't want to wrap an
|
||||
application you can pass it :exc:`NotFound`.
|
||||
:param exports: a list or dict of exported files and folders.
|
||||
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
|
||||
:param fallback_mimetype: the fallback mimetype for unknown files.
|
||||
:param cache: enable or disable caching headers.
|
||||
:param cache_timeout: the cache timeout in seconds for the headers.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
app,
|
||||
exports,
|
||||
disallow=None,
|
||||
cache=True,
|
||||
cache_timeout=60 * 60 * 12,
|
||||
fallback_mimetype="text/plain",
|
||||
):
|
||||
self.app = app
|
||||
self.exports = []
|
||||
self.cache = cache
|
||||
self.cache_timeout = cache_timeout
|
||||
|
||||
if hasattr(exports, "items"):
|
||||
exports = exports.items()
|
||||
|
||||
for key, value in exports:
|
||||
if isinstance(value, tuple):
|
||||
loader = self.get_package_loader(*value)
|
||||
elif isinstance(value, string_types):
|
||||
if os.path.isfile(value):
|
||||
loader = self.get_file_loader(value)
|
||||
else:
|
||||
loader = self.get_directory_loader(value)
|
||||
else:
|
||||
raise TypeError("unknown def %r" % value)
|
||||
|
||||
self.exports.append((key, loader))
|
||||
|
||||
if disallow is not None:
|
||||
from fnmatch import fnmatch
|
||||
|
||||
self.is_allowed = lambda x: not fnmatch(x, disallow)
|
||||
|
||||
self.fallback_mimetype = fallback_mimetype
|
||||
|
||||
def is_allowed(self, filename):
|
||||
"""Subclasses can override this method to disallow the access to
|
||||
certain files. However by providing `disallow` in the constructor
|
||||
this method is overwritten.
|
||||
"""
|
||||
return True
|
||||
|
||||
def _opener(self, filename):
|
||||
return lambda: (
|
||||
open(filename, "rb"),
|
||||
datetime.utcfromtimestamp(os.path.getmtime(filename)),
|
||||
int(os.path.getsize(filename)),
|
||||
)
|
||||
|
||||
def get_file_loader(self, filename):
|
||||
return lambda x: (os.path.basename(filename), self._opener(filename))
|
||||
|
||||
def get_package_loader(self, package, package_path):
|
||||
from pkg_resources import DefaultProvider, ResourceManager, get_provider
|
||||
|
||||
loadtime = datetime.utcnow()
|
||||
provider = get_provider(package)
|
||||
manager = ResourceManager()
|
||||
filesystem_bound = isinstance(provider, DefaultProvider)
|
||||
|
||||
def loader(path):
|
||||
if path is None:
|
||||
return None, None
|
||||
|
||||
path = posixpath.join(package_path, path)
|
||||
|
||||
if not provider.has_resource(path):
|
||||
return None, None
|
||||
|
||||
basename = posixpath.basename(path)
|
||||
|
||||
if filesystem_bound:
|
||||
return (
|
||||
basename,
|
||||
self._opener(provider.get_resource_filename(manager, path)),
|
||||
)
|
||||
|
||||
s = provider.get_resource_string(manager, path)
|
||||
return basename, lambda: (BytesIO(s), loadtime, len(s))
|
||||
|
||||
return loader
|
||||
|
||||
def get_directory_loader(self, directory):
|
||||
def loader(path):
|
||||
if path is not None:
|
||||
path = os.path.join(directory, path)
|
||||
else:
|
||||
path = directory
|
||||
|
||||
if os.path.isfile(path):
|
||||
return os.path.basename(path), self._opener(path)
|
||||
|
||||
return None, None
|
||||
|
||||
return loader
|
||||
|
||||
def generate_etag(self, mtime, file_size, real_filename):
|
||||
if not isinstance(real_filename, bytes):
|
||||
real_filename = real_filename.encode(get_filesystem_encoding())
|
||||
|
||||
return "wzsdm-%d-%s-%s" % (
|
||||
mktime(mtime.timetuple()),
|
||||
file_size,
|
||||
adler32(real_filename) & 0xFFFFFFFF,
|
||||
)
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
cleaned_path = get_path_info(environ)
|
||||
|
||||
if PY2:
|
||||
cleaned_path = cleaned_path.encode(get_filesystem_encoding())
|
||||
|
||||
# sanitize the path for non unix systems
|
||||
cleaned_path = cleaned_path.strip("/")
|
||||
|
||||
for sep in os.sep, os.altsep:
|
||||
if sep and sep != "/":
|
||||
cleaned_path = cleaned_path.replace(sep, "/")
|
||||
|
||||
path = "/" + "/".join(x for x in cleaned_path.split("/") if x and x != "..")
|
||||
file_loader = None
|
||||
|
||||
for search_path, loader in self.exports:
|
||||
if search_path == path:
|
||||
real_filename, file_loader = loader(None)
|
||||
|
||||
if file_loader is not None:
|
||||
break
|
||||
|
||||
if not search_path.endswith("/"):
|
||||
search_path += "/"
|
||||
|
||||
if path.startswith(search_path):
|
||||
real_filename, file_loader = loader(path[len(search_path) :])
|
||||
|
||||
if file_loader is not None:
|
||||
break
|
||||
|
||||
if file_loader is None or not self.is_allowed(real_filename):
|
||||
return self.app(environ, start_response)
|
||||
|
||||
guessed_type = mimetypes.guess_type(real_filename)
|
||||
mime_type = guessed_type[0] or self.fallback_mimetype
|
||||
f, mtime, file_size = file_loader()
|
||||
|
||||
headers = [("Date", http_date())]
|
||||
|
||||
if self.cache:
|
||||
timeout = self.cache_timeout
|
||||
etag = self.generate_etag(mtime, file_size, real_filename)
|
||||
headers += [
|
||||
("Etag", '"%s"' % etag),
|
||||
("Cache-Control", "max-age=%d, public" % timeout),
|
||||
]
|
||||
|
||||
if not is_resource_modified(environ, etag, last_modified=mtime):
|
||||
f.close()
|
||||
start_response("304 Not Modified", headers)
|
||||
return []
|
||||
|
||||
headers.append(("Expires", http_date(time() + timeout)))
|
||||
else:
|
||||
headers.append(("Cache-Control", "public"))
|
||||
|
||||
headers.extend(
|
||||
(
|
||||
("Content-Type", mime_type),
|
||||
("Content-Length", str(file_size)),
|
||||
("Last-Modified", http_date(mtime)),
|
||||
)
|
||||
)
|
||||
start_response("200 OK", headers)
|
||||
return wrap_file(environ, f)
|
||||
117
python/werkzeug/posixemulation.py
Normal file
117
python/werkzeug/posixemulation.py
Normal file
@@ -0,0 +1,117 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
r"""
|
||||
werkzeug.posixemulation
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Provides a POSIX emulation for some features that are relevant to
|
||||
web applications. The main purpose is to simplify support for
|
||||
systems such as Windows NT that are not 100% POSIX compatible.
|
||||
|
||||
Currently this only implements a :func:`rename` function that
|
||||
follows POSIX semantics. Eg: if the target file already exists it
|
||||
will be replaced without asking.
|
||||
|
||||
This module was introduced in 0.6.1 and is not a public interface.
|
||||
It might become one in later versions of Werkzeug.
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import errno
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
import time
|
||||
|
||||
from ._compat import to_unicode
|
||||
from .filesystem import get_filesystem_encoding
|
||||
|
||||
can_rename_open_file = False
|
||||
|
||||
if os.name == "nt":
|
||||
try:
|
||||
import ctypes
|
||||
|
||||
_MOVEFILE_REPLACE_EXISTING = 0x1
|
||||
_MOVEFILE_WRITE_THROUGH = 0x8
|
||||
_MoveFileEx = ctypes.windll.kernel32.MoveFileExW
|
||||
|
||||
def _rename(src, dst):
|
||||
src = to_unicode(src, get_filesystem_encoding())
|
||||
dst = to_unicode(dst, get_filesystem_encoding())
|
||||
if _rename_atomic(src, dst):
|
||||
return True
|
||||
retry = 0
|
||||
rv = False
|
||||
while not rv and retry < 100:
|
||||
rv = _MoveFileEx(
|
||||
src, dst, _MOVEFILE_REPLACE_EXISTING | _MOVEFILE_WRITE_THROUGH
|
||||
)
|
||||
if not rv:
|
||||
time.sleep(0.001)
|
||||
retry += 1
|
||||
return rv
|
||||
|
||||
# new in Vista and Windows Server 2008
|
||||
_CreateTransaction = ctypes.windll.ktmw32.CreateTransaction
|
||||
_CommitTransaction = ctypes.windll.ktmw32.CommitTransaction
|
||||
_MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW
|
||||
_CloseHandle = ctypes.windll.kernel32.CloseHandle
|
||||
can_rename_open_file = True
|
||||
|
||||
def _rename_atomic(src, dst):
|
||||
ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, "Werkzeug rename")
|
||||
if ta == -1:
|
||||
return False
|
||||
try:
|
||||
retry = 0
|
||||
rv = False
|
||||
while not rv and retry < 100:
|
||||
rv = _MoveFileTransacted(
|
||||
src,
|
||||
dst,
|
||||
None,
|
||||
None,
|
||||
_MOVEFILE_REPLACE_EXISTING | _MOVEFILE_WRITE_THROUGH,
|
||||
ta,
|
||||
)
|
||||
if rv:
|
||||
rv = _CommitTransaction(ta)
|
||||
break
|
||||
else:
|
||||
time.sleep(0.001)
|
||||
retry += 1
|
||||
return rv
|
||||
finally:
|
||||
_CloseHandle(ta)
|
||||
|
||||
except Exception:
|
||||
|
||||
def _rename(src, dst):
|
||||
return False
|
||||
|
||||
def _rename_atomic(src, dst):
|
||||
return False
|
||||
|
||||
def rename(src, dst):
|
||||
# Try atomic or pseudo-atomic rename
|
||||
if _rename(src, dst):
|
||||
return
|
||||
# Fall back to "move away and replace"
|
||||
try:
|
||||
os.rename(src, dst)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
old = "%s-%08x" % (dst, random.randint(0, sys.maxsize))
|
||||
os.rename(dst, old)
|
||||
os.rename(src, dst)
|
||||
try:
|
||||
os.unlink(old)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
else:
|
||||
rename = os.rename
|
||||
can_rename_open_file = True
|
||||
2026
python/werkzeug/routing.py
Normal file
2026
python/werkzeug/routing.py
Normal file
File diff suppressed because it is too large
Load Diff
241
python/werkzeug/security.py
Normal file
241
python/werkzeug/security.py
Normal file
@@ -0,0 +1,241 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.security
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
Security related helpers such as secure password hashing tools.
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import codecs
|
||||
import hashlib
|
||||
import hmac
|
||||
import os
|
||||
import posixpath
|
||||
from random import SystemRandom
|
||||
from struct import Struct
|
||||
|
||||
from ._compat import izip
|
||||
from ._compat import PY2
|
||||
from ._compat import range_type
|
||||
from ._compat import text_type
|
||||
from ._compat import to_bytes
|
||||
from ._compat import to_native
|
||||
|
||||
SALT_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
DEFAULT_PBKDF2_ITERATIONS = 150000
|
||||
|
||||
_pack_int = Struct(">I").pack
|
||||
_builtin_safe_str_cmp = getattr(hmac, "compare_digest", None)
|
||||
_sys_rng = SystemRandom()
|
||||
_os_alt_seps = list(
|
||||
sep for sep in [os.path.sep, os.path.altsep] if sep not in (None, "/")
|
||||
)
|
||||
|
||||
|
||||
def pbkdf2_hex(
|
||||
data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS, keylen=None, hashfunc=None
|
||||
):
|
||||
"""Like :func:`pbkdf2_bin`, but returns a hex-encoded string.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
|
||||
:param data: the data to derive.
|
||||
:param salt: the salt for the derivation.
|
||||
:param iterations: the number of iterations.
|
||||
:param keylen: the length of the resulting key. If not provided,
|
||||
the digest size will be used.
|
||||
:param hashfunc: the hash function to use. This can either be the
|
||||
string name of a known hash function, or a function
|
||||
from the hashlib module. Defaults to sha256.
|
||||
"""
|
||||
rv = pbkdf2_bin(data, salt, iterations, keylen, hashfunc)
|
||||
return to_native(codecs.encode(rv, "hex_codec"))
|
||||
|
||||
|
||||
def pbkdf2_bin(
|
||||
data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS, keylen=None, hashfunc=None
|
||||
):
|
||||
"""Returns a binary digest for the PBKDF2 hash algorithm of `data`
|
||||
with the given `salt`. It iterates `iterations` times and produces a
|
||||
key of `keylen` bytes. By default, SHA-256 is used as hash function;
|
||||
a different hashlib `hashfunc` can be provided.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
|
||||
:param data: the data to derive.
|
||||
:param salt: the salt for the derivation.
|
||||
:param iterations: the number of iterations.
|
||||
:param keylen: the length of the resulting key. If not provided
|
||||
the digest size will be used.
|
||||
:param hashfunc: the hash function to use. This can either be the
|
||||
string name of a known hash function or a function
|
||||
from the hashlib module. Defaults to sha256.
|
||||
"""
|
||||
if not hashfunc:
|
||||
hashfunc = "sha256"
|
||||
|
||||
data = to_bytes(data)
|
||||
salt = to_bytes(salt)
|
||||
|
||||
if callable(hashfunc):
|
||||
_test_hash = hashfunc()
|
||||
hash_name = getattr(_test_hash, "name", None)
|
||||
else:
|
||||
hash_name = hashfunc
|
||||
return hashlib.pbkdf2_hmac(hash_name, data, salt, iterations, keylen)
|
||||
|
||||
|
||||
def safe_str_cmp(a, b):
|
||||
"""This function compares strings in somewhat constant time. This
|
||||
requires that the length of at least one string is known in advance.
|
||||
|
||||
Returns `True` if the two strings are equal, or `False` if they are not.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
"""
|
||||
if isinstance(a, text_type):
|
||||
a = a.encode("utf-8")
|
||||
if isinstance(b, text_type):
|
||||
b = b.encode("utf-8")
|
||||
|
||||
if _builtin_safe_str_cmp is not None:
|
||||
return _builtin_safe_str_cmp(a, b)
|
||||
|
||||
if len(a) != len(b):
|
||||
return False
|
||||
|
||||
rv = 0
|
||||
if PY2:
|
||||
for x, y in izip(a, b):
|
||||
rv |= ord(x) ^ ord(y)
|
||||
else:
|
||||
for x, y in izip(a, b):
|
||||
rv |= x ^ y
|
||||
|
||||
return rv == 0
|
||||
|
||||
|
||||
def gen_salt(length):
|
||||
"""Generate a random string of SALT_CHARS with specified ``length``."""
|
||||
if length <= 0:
|
||||
raise ValueError("Salt length must be positive")
|
||||
return "".join(_sys_rng.choice(SALT_CHARS) for _ in range_type(length))
|
||||
|
||||
|
||||
def _hash_internal(method, salt, password):
|
||||
"""Internal password hash helper. Supports plaintext without salt,
|
||||
unsalted and salted passwords. In case salted passwords are used
|
||||
hmac is used.
|
||||
"""
|
||||
if method == "plain":
|
||||
return password, method
|
||||
|
||||
if isinstance(password, text_type):
|
||||
password = password.encode("utf-8")
|
||||
|
||||
if method.startswith("pbkdf2:"):
|
||||
args = method[7:].split(":")
|
||||
if len(args) not in (1, 2):
|
||||
raise ValueError("Invalid number of arguments for PBKDF2")
|
||||
method = args.pop(0)
|
||||
iterations = args and int(args[0] or 0) or DEFAULT_PBKDF2_ITERATIONS
|
||||
is_pbkdf2 = True
|
||||
actual_method = "pbkdf2:%s:%d" % (method, iterations)
|
||||
else:
|
||||
is_pbkdf2 = False
|
||||
actual_method = method
|
||||
|
||||
if is_pbkdf2:
|
||||
if not salt:
|
||||
raise ValueError("Salt is required for PBKDF2")
|
||||
rv = pbkdf2_hex(password, salt, iterations, hashfunc=method)
|
||||
elif salt:
|
||||
if isinstance(salt, text_type):
|
||||
salt = salt.encode("utf-8")
|
||||
mac = _create_mac(salt, password, method)
|
||||
rv = mac.hexdigest()
|
||||
else:
|
||||
rv = hashlib.new(method, password).hexdigest()
|
||||
return rv, actual_method
|
||||
|
||||
|
||||
def _create_mac(key, msg, method):
|
||||
if callable(method):
|
||||
return hmac.HMAC(key, msg, method)
|
||||
|
||||
def hashfunc(d=b""):
|
||||
return hashlib.new(method, d)
|
||||
|
||||
# Python 2.7 used ``hasattr(digestmod, '__call__')``
|
||||
# to detect if hashfunc is callable
|
||||
hashfunc.__call__ = hashfunc
|
||||
return hmac.HMAC(key, msg, hashfunc)
|
||||
|
||||
|
||||
def generate_password_hash(password, method="pbkdf2:sha256", salt_length=8):
|
||||
"""Hash a password with the given method and salt with a string of
|
||||
the given length. The format of the string returned includes the method
|
||||
that was used so that :func:`check_password_hash` can check the hash.
|
||||
|
||||
The format for the hashed string looks like this::
|
||||
|
||||
method$salt$hash
|
||||
|
||||
This method can **not** generate unsalted passwords but it is possible
|
||||
to set param method='plain' in order to enforce plaintext passwords.
|
||||
If a salt is used, hmac is used internally to salt the password.
|
||||
|
||||
If PBKDF2 is wanted it can be enabled by setting the method to
|
||||
``pbkdf2:method:iterations`` where iterations is optional::
|
||||
|
||||
pbkdf2:sha256:80000$salt$hash
|
||||
pbkdf2:sha256$salt$hash
|
||||
|
||||
:param password: the password to hash.
|
||||
:param method: the hash method to use (one that hashlib supports). Can
|
||||
optionally be in the format ``pbkdf2:<method>[:iterations]``
|
||||
to enable PBKDF2.
|
||||
:param salt_length: the length of the salt in letters.
|
||||
"""
|
||||
salt = gen_salt(salt_length) if method != "plain" else ""
|
||||
h, actual_method = _hash_internal(method, salt, password)
|
||||
return "%s$%s$%s" % (actual_method, salt, h)
|
||||
|
||||
|
||||
def check_password_hash(pwhash, password):
|
||||
"""check a password against a given salted and hashed password value.
|
||||
In order to support unsalted legacy passwords this method supports
|
||||
plain text passwords, md5 and sha1 hashes (both salted and unsalted).
|
||||
|
||||
Returns `True` if the password matched, `False` otherwise.
|
||||
|
||||
:param pwhash: a hashed string like returned by
|
||||
:func:`generate_password_hash`.
|
||||
:param password: the plaintext password to compare against the hash.
|
||||
"""
|
||||
if pwhash.count("$") < 2:
|
||||
return False
|
||||
method, salt, hashval = pwhash.split("$", 2)
|
||||
return safe_str_cmp(_hash_internal(method, salt, password)[0], hashval)
|
||||
|
||||
|
||||
def safe_join(directory, *pathnames):
|
||||
"""Safely join `directory` and one or more untrusted `pathnames`. If this
|
||||
cannot be done, this function returns ``None``.
|
||||
|
||||
:param directory: the base directory.
|
||||
:param pathnames: the untrusted pathnames relative to that directory.
|
||||
"""
|
||||
parts = [directory]
|
||||
for filename in pathnames:
|
||||
if filename != "":
|
||||
filename = posixpath.normpath(filename)
|
||||
for sep in _os_alt_seps:
|
||||
if sep in filename:
|
||||
return None
|
||||
if os.path.isabs(filename) or filename == ".." or filename.startswith("../"):
|
||||
return None
|
||||
parts.append(filename)
|
||||
return posixpath.join(*parts)
|
||||
1074
python/werkzeug/serving.py
Normal file
1074
python/werkzeug/serving.py
Normal file
File diff suppressed because it is too large
Load Diff
1146
python/werkzeug/test.py
Normal file
1146
python/werkzeug/test.py
Normal file
File diff suppressed because it is too large
Load Diff
241
python/werkzeug/testapp.py
Normal file
241
python/werkzeug/testapp.py
Normal file
@@ -0,0 +1,241 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.testapp
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Provide a small test application that can be used to test a WSGI server
|
||||
and check it for WSGI compliance.
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import base64
|
||||
import os
|
||||
import sys
|
||||
from textwrap import wrap
|
||||
|
||||
import werkzeug
|
||||
from .utils import escape
|
||||
from .wrappers import BaseRequest as Request
|
||||
from .wrappers import BaseResponse as Response
|
||||
|
||||
logo = Response(
|
||||
base64.b64decode(
|
||||
"""
|
||||
R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP/////////
|
||||
//////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv
|
||||
nB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25
|
||||
7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq
|
||||
ncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX
|
||||
m5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G
|
||||
p9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo
|
||||
SlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf
|
||||
78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA
|
||||
ADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA
|
||||
tQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx
|
||||
w1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx
|
||||
lTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45
|
||||
Jwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB
|
||||
yyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd
|
||||
dj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r
|
||||
idnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh
|
||||
EIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8
|
||||
ba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64
|
||||
gqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C
|
||||
JyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y
|
||||
Az5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9
|
||||
YQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX
|
||||
c2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb
|
||||
qbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL
|
||||
cWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG
|
||||
cF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2
|
||||
KiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe
|
||||
EZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb
|
||||
UpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB
|
||||
Oco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z
|
||||
aDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn
|
||||
kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs
|
||||
="""
|
||||
),
|
||||
mimetype="image/png",
|
||||
)
|
||||
|
||||
|
||||
TEMPLATE = u"""\
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
|
||||
"http://www.w3.org/TR/html4/loose.dtd">
|
||||
<title>WSGI Information</title>
|
||||
<style type="text/css">
|
||||
@import url(https://fonts.googleapis.com/css?family=Ubuntu);
|
||||
|
||||
body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
|
||||
'Verdana', sans-serif; background-color: white; color: #000;
|
||||
font-size: 15px; text-align: center; }
|
||||
#logo { float: right; padding: 0 0 10px 10px; }
|
||||
div.box { text-align: left; width: 45em; margin: auto; padding: 50px 0;
|
||||
background-color: white; }
|
||||
h1, h2 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
|
||||
'Geneva', 'Verdana', sans-serif; font-weight: normal; }
|
||||
h1 { margin: 0 0 30px 0; }
|
||||
h2 { font-size: 1.4em; margin: 1em 0 0.5em 0; }
|
||||
table { width: 100%%; border-collapse: collapse; border: 1px solid #AFC5C9 }
|
||||
table th { background-color: #AFC1C4; color: white; font-size: 0.72em;
|
||||
font-weight: normal; width: 18em; vertical-align: top;
|
||||
padding: 0.5em 0 0.1em 0.5em; }
|
||||
table td { border: 1px solid #AFC5C9; padding: 0.1em 0 0.1em 0.5em; }
|
||||
code { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
|
||||
monospace; font-size: 0.7em; }
|
||||
ul li { line-height: 1.5em; }
|
||||
ul.path { font-size: 0.7em; margin: 0 -30px; padding: 8px 30px;
|
||||
list-style: none; background: #E8EFF0; }
|
||||
ul.path li { line-height: 1.6em; }
|
||||
li.virtual { color: #999; text-decoration: underline; }
|
||||
li.exp { background: white; }
|
||||
</style>
|
||||
<div class="box">
|
||||
<img src="?resource=logo" id="logo" alt="[The Werkzeug Logo]" />
|
||||
<h1>WSGI Information</h1>
|
||||
<p>
|
||||
This page displays all available information about the WSGI server and
|
||||
the underlying Python interpreter.
|
||||
<h2 id="python-interpreter">Python Interpreter</h2>
|
||||
<table>
|
||||
<tr>
|
||||
<th>Python Version
|
||||
<td>%(python_version)s
|
||||
<tr>
|
||||
<th>Platform
|
||||
<td>%(platform)s [%(os)s]
|
||||
<tr>
|
||||
<th>API Version
|
||||
<td>%(api_version)s
|
||||
<tr>
|
||||
<th>Byteorder
|
||||
<td>%(byteorder)s
|
||||
<tr>
|
||||
<th>Werkzeug Version
|
||||
<td>%(werkzeug_version)s
|
||||
</table>
|
||||
<h2 id="wsgi-environment">WSGI Environment</h2>
|
||||
<table>%(wsgi_env)s</table>
|
||||
<h2 id="installed-eggs">Installed Eggs</h2>
|
||||
<p>
|
||||
The following python packages were installed on the system as
|
||||
Python eggs:
|
||||
<ul>%(python_eggs)s</ul>
|
||||
<h2 id="sys-path">System Path</h2>
|
||||
<p>
|
||||
The following paths are the current contents of the load path. The
|
||||
following entries are looked up for Python packages. Note that not
|
||||
all items in this path are folders. Gray and underlined items are
|
||||
entries pointing to invalid resources or used by custom import hooks
|
||||
such as the zip importer.
|
||||
<p>
|
||||
Items with a bright background were expanded for display from a relative
|
||||
path. If you encounter such paths in the output you might want to check
|
||||
your setup as relative paths are usually problematic in multithreaded
|
||||
environments.
|
||||
<ul class="path">%(sys_path)s</ul>
|
||||
</div>
|
||||
"""
|
||||
|
||||
|
||||
def iter_sys_path():
|
||||
if os.name == "posix":
|
||||
|
||||
def strip(x):
|
||||
prefix = os.path.expanduser("~")
|
||||
if x.startswith(prefix):
|
||||
x = "~" + x[len(prefix) :]
|
||||
return x
|
||||
|
||||
else:
|
||||
|
||||
def strip(x):
|
||||
return x
|
||||
|
||||
cwd = os.path.abspath(os.getcwd())
|
||||
for item in sys.path:
|
||||
path = os.path.join(cwd, item or os.path.curdir)
|
||||
yield strip(os.path.normpath(path)), not os.path.isdir(path), path != item
|
||||
|
||||
|
||||
def render_testapp(req):
|
||||
try:
|
||||
import pkg_resources
|
||||
except ImportError:
|
||||
eggs = ()
|
||||
else:
|
||||
eggs = sorted(pkg_resources.working_set, key=lambda x: x.project_name.lower())
|
||||
python_eggs = []
|
||||
for egg in eggs:
|
||||
try:
|
||||
version = egg.version
|
||||
except (ValueError, AttributeError):
|
||||
version = "unknown"
|
||||
python_eggs.append(
|
||||
"<li>%s <small>[%s]</small>" % (escape(egg.project_name), escape(version))
|
||||
)
|
||||
|
||||
wsgi_env = []
|
||||
sorted_environ = sorted(req.environ.items(), key=lambda x: repr(x[0]).lower())
|
||||
for key, value in sorted_environ:
|
||||
wsgi_env.append(
|
||||
"<tr><th>%s<td><code>%s</code>"
|
||||
% (escape(str(key)), " ".join(wrap(escape(repr(value)))))
|
||||
)
|
||||
|
||||
sys_path = []
|
||||
for item, virtual, expanded in iter_sys_path():
|
||||
class_ = []
|
||||
if virtual:
|
||||
class_.append("virtual")
|
||||
if expanded:
|
||||
class_.append("exp")
|
||||
sys_path.append(
|
||||
"<li%s>%s"
|
||||
% (' class="%s"' % " ".join(class_) if class_ else "", escape(item))
|
||||
)
|
||||
|
||||
return (
|
||||
TEMPLATE
|
||||
% {
|
||||
"python_version": "<br>".join(escape(sys.version).splitlines()),
|
||||
"platform": escape(sys.platform),
|
||||
"os": escape(os.name),
|
||||
"api_version": sys.api_version,
|
||||
"byteorder": sys.byteorder,
|
||||
"werkzeug_version": werkzeug.__version__,
|
||||
"python_eggs": "\n".join(python_eggs),
|
||||
"wsgi_env": "\n".join(wsgi_env),
|
||||
"sys_path": "\n".join(sys_path),
|
||||
}
|
||||
).encode("utf-8")
|
||||
|
||||
|
||||
def test_app(environ, start_response):
|
||||
"""Simple test application that dumps the environment. You can use
|
||||
it to check if Werkzeug is working properly:
|
||||
|
||||
.. sourcecode:: pycon
|
||||
|
||||
>>> from werkzeug.serving import run_simple
|
||||
>>> from werkzeug.testapp import test_app
|
||||
>>> run_simple('localhost', 3000, test_app)
|
||||
* Running on http://localhost:3000/
|
||||
|
||||
The application displays important information from the WSGI environment,
|
||||
the Python interpreter and the installed libraries.
|
||||
"""
|
||||
req = Request(environ, populate_request=False)
|
||||
if req.args.get("resource") == "logo":
|
||||
response = logo
|
||||
else:
|
||||
response = Response(render_testapp(req), mimetype="text/html")
|
||||
return response(environ, start_response)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from .serving import run_simple
|
||||
|
||||
run_simple("localhost", 5000, test_app, use_reloader=True)
|
||||
1134
python/werkzeug/urls.py
Normal file
1134
python/werkzeug/urls.py
Normal file
File diff suppressed because it is too large
Load Diff
220
python/werkzeug/useragents.py
Normal file
220
python/werkzeug/useragents.py
Normal file
@@ -0,0 +1,220 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.useragents
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This module provides a helper to inspect user agent strings. This module
|
||||
is far from complete but should work for most of the currently available
|
||||
browsers.
|
||||
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import re
|
||||
import warnings
|
||||
|
||||
|
||||
class UserAgentParser(object):
|
||||
"""A simple user agent parser. Used by the `UserAgent`."""
|
||||
|
||||
platforms = (
|
||||
("cros", "chromeos"),
|
||||
("iphone|ios", "iphone"),
|
||||
("ipad", "ipad"),
|
||||
(r"darwin|mac|os\s*x", "macos"),
|
||||
("win", "windows"),
|
||||
(r"android", "android"),
|
||||
("netbsd", "netbsd"),
|
||||
("openbsd", "openbsd"),
|
||||
("freebsd", "freebsd"),
|
||||
("dragonfly", "dragonflybsd"),
|
||||
("(sun|i86)os", "solaris"),
|
||||
(r"x11|lin(\b|ux)?", "linux"),
|
||||
(r"nintendo\s+wii", "wii"),
|
||||
("irix", "irix"),
|
||||
("hp-?ux", "hpux"),
|
||||
("aix", "aix"),
|
||||
("sco|unix_sv", "sco"),
|
||||
("bsd", "bsd"),
|
||||
("amiga", "amiga"),
|
||||
("blackberry|playbook", "blackberry"),
|
||||
("symbian", "symbian"),
|
||||
)
|
||||
browsers = (
|
||||
("googlebot", "google"),
|
||||
("msnbot", "msn"),
|
||||
("yahoo", "yahoo"),
|
||||
("ask jeeves", "ask"),
|
||||
(r"aol|america\s+online\s+browser", "aol"),
|
||||
("opera", "opera"),
|
||||
("edge", "edge"),
|
||||
("chrome|crios", "chrome"),
|
||||
("seamonkey", "seamonkey"),
|
||||
("firefox|firebird|phoenix|iceweasel", "firefox"),
|
||||
("galeon", "galeon"),
|
||||
("safari|version", "safari"),
|
||||
("webkit", "webkit"),
|
||||
("camino", "camino"),
|
||||
("konqueror", "konqueror"),
|
||||
("k-meleon", "kmeleon"),
|
||||
("netscape", "netscape"),
|
||||
(r"msie|microsoft\s+internet\s+explorer|trident/.+? rv:", "msie"),
|
||||
("lynx", "lynx"),
|
||||
("links", "links"),
|
||||
("Baiduspider", "baidu"),
|
||||
("bingbot", "bing"),
|
||||
("mozilla", "mozilla"),
|
||||
)
|
||||
|
||||
_browser_version_re = r"(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?"
|
||||
_language_re = re.compile(
|
||||
r"(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|"
|
||||
r"(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)"
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]
|
||||
self.browsers = [
|
||||
(b, re.compile(self._browser_version_re % a, re.I))
|
||||
for a, b in self.browsers
|
||||
]
|
||||
|
||||
def __call__(self, user_agent):
|
||||
for platform, regex in self.platforms: # noqa: B007
|
||||
match = regex.search(user_agent)
|
||||
if match is not None:
|
||||
break
|
||||
else:
|
||||
platform = None
|
||||
for browser, regex in self.browsers: # noqa: B007
|
||||
match = regex.search(user_agent)
|
||||
if match is not None:
|
||||
version = match.group(1)
|
||||
break
|
||||
else:
|
||||
browser = version = None
|
||||
match = self._language_re.search(user_agent)
|
||||
if match is not None:
|
||||
language = match.group(1) or match.group(2)
|
||||
else:
|
||||
language = None
|
||||
return platform, browser, version, language
|
||||
|
||||
|
||||
class UserAgent(object):
|
||||
"""Represents a user agent. Pass it a WSGI environment or a user agent
|
||||
string and you can inspect some of the details from the user agent
|
||||
string via the attributes. The following attributes exist:
|
||||
|
||||
.. attribute:: string
|
||||
|
||||
the raw user agent string
|
||||
|
||||
.. attribute:: platform
|
||||
|
||||
the browser platform. The following platforms are currently
|
||||
recognized:
|
||||
|
||||
- `aix`
|
||||
- `amiga`
|
||||
- `android`
|
||||
- `blackberry`
|
||||
- `bsd`
|
||||
- `chromeos`
|
||||
- `dragonflybsd`
|
||||
- `freebsd`
|
||||
- `hpux`
|
||||
- `ipad`
|
||||
- `iphone`
|
||||
- `irix`
|
||||
- `linux`
|
||||
- `macos`
|
||||
- `netbsd`
|
||||
- `openbsd`
|
||||
- `sco`
|
||||
- `solaris`
|
||||
- `symbian`
|
||||
- `wii`
|
||||
- `windows`
|
||||
|
||||
.. attribute:: browser
|
||||
|
||||
the name of the browser. The following browsers are currently
|
||||
recognized:
|
||||
|
||||
- `aol` *
|
||||
- `ask` *
|
||||
- `baidu` *
|
||||
- `bing` *
|
||||
- `camino`
|
||||
- `chrome`
|
||||
- `edge`
|
||||
- `firefox`
|
||||
- `galeon`
|
||||
- `google` *
|
||||
- `kmeleon`
|
||||
- `konqueror`
|
||||
- `links`
|
||||
- `lynx`
|
||||
- `mozilla`
|
||||
- `msie`
|
||||
- `msn`
|
||||
- `netscape`
|
||||
- `opera`
|
||||
- `safari`
|
||||
- `seamonkey`
|
||||
- `webkit`
|
||||
- `yahoo` *
|
||||
|
||||
(Browsers marked with a star (``*``) are crawlers.)
|
||||
|
||||
.. attribute:: version
|
||||
|
||||
the version of the browser
|
||||
|
||||
.. attribute:: language
|
||||
|
||||
the language of the browser
|
||||
"""
|
||||
|
||||
_parser = UserAgentParser()
|
||||
|
||||
def __init__(self, environ_or_string):
|
||||
if isinstance(environ_or_string, dict):
|
||||
environ_or_string = environ_or_string.get("HTTP_USER_AGENT", "")
|
||||
self.string = environ_or_string
|
||||
self.platform, self.browser, self.version, self.language = self._parser(
|
||||
environ_or_string
|
||||
)
|
||||
|
||||
def to_header(self):
|
||||
return self.string
|
||||
|
||||
def __str__(self):
|
||||
return self.string
|
||||
|
||||
def __nonzero__(self):
|
||||
return bool(self.browser)
|
||||
|
||||
__bool__ = __nonzero__
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s %r/%s>" % (self.__class__.__name__, self.browser, self.version)
|
||||
|
||||
|
||||
# DEPRECATED
|
||||
from .wrappers import UserAgentMixin as _UserAgentMixin
|
||||
|
||||
|
||||
class UserAgentMixin(_UserAgentMixin):
|
||||
@property
|
||||
def user_agent(self, *args, **kwargs):
|
||||
warnings.warn(
|
||||
"'werkzeug.useragents.UserAgentMixin' should be imported"
|
||||
" from 'werkzeug.wrappers.UserAgentMixin'. This old import"
|
||||
" will be removed in version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return super(_UserAgentMixin, self).user_agent
|
||||
836
python/werkzeug/utils.py
Normal file
836
python/werkzeug/utils.py
Normal file
@@ -0,0 +1,836 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
werkzeug.utils
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
This module implements various utilities for WSGI applications. Most of
|
||||
them are used by the request and response wrappers but especially for
|
||||
middleware development it makes sense to use them without the wrappers.
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
import codecs
|
||||
import os
|
||||
import pkgutil
|
||||
import re
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
from ._compat import iteritems
|
||||
from ._compat import PY2
|
||||
from ._compat import reraise
|
||||
from ._compat import string_types
|
||||
from ._compat import text_type
|
||||
from ._compat import unichr
|
||||
from ._internal import _DictAccessorProperty
|
||||
from ._internal import _missing
|
||||
from ._internal import _parse_signature
|
||||
|
||||
try:
|
||||
from html.entities import name2codepoint
|
||||
except ImportError:
|
||||
from htmlentitydefs import name2codepoint
|
||||
|
||||
|
||||
_format_re = re.compile(r"\$(?:(%s)|\{(%s)\})" % (("[a-zA-Z_][a-zA-Z0-9_]*",) * 2))
|
||||
_entity_re = re.compile(r"&([^;]+);")
|
||||
_filename_ascii_strip_re = re.compile(r"[^A-Za-z0-9_.-]")
|
||||
_windows_device_files = (
|
||||
"CON",
|
||||
"AUX",
|
||||
"COM1",
|
||||
"COM2",
|
||||
"COM3",
|
||||
"COM4",
|
||||
"LPT1",
|
||||
"LPT2",
|
||||
"LPT3",
|
||||
"PRN",
|
||||
"NUL",
|
||||
)
|
||||
|
||||
|
||||
class cached_property(property):
|
||||
"""A decorator that converts a function into a lazy property. The
|
||||
function wrapped is called the first time to retrieve the result
|
||||
and then that calculated result is used the next time you access
|
||||
the value::
|
||||
|
||||
class Foo(object):
|
||||
|
||||
@cached_property
|
||||
def foo(self):
|
||||
# calculate something important here
|
||||
return 42
|
||||
|
||||
The class has to have a `__dict__` in order for this property to
|
||||
work.
|
||||
"""
|
||||
|
||||
# implementation detail: A subclass of python's builtin property
|
||||
# decorator, we override __get__ to check for a cached value. If one
|
||||
# chooses to invoke __get__ by hand the property will still work as
|
||||
# expected because the lookup logic is replicated in __get__ for
|
||||
# manual invocation.
|
||||
|
||||
def __init__(self, func, name=None, doc=None):
|
||||
self.__name__ = name or func.__name__
|
||||
self.__module__ = func.__module__
|
||||
self.__doc__ = doc or func.__doc__
|
||||
self.func = func
|
||||
|
||||
def __set__(self, obj, value):
|
||||
obj.__dict__[self.__name__] = value
|
||||
|
||||
def __get__(self, obj, type=None):
|
||||
if obj is None:
|
||||
return self
|
||||
value = obj.__dict__.get(self.__name__, _missing)
|
||||
if value is _missing:
|
||||
value = self.func(obj)
|
||||
obj.__dict__[self.__name__] = value
|
||||
return value
|
||||
|
||||
|
||||
class environ_property(_DictAccessorProperty):
|
||||
"""Maps request attributes to environment variables. This works not only
|
||||
for the Werzeug request object, but also any other class with an
|
||||
environ attribute:
|
||||
|
||||
>>> class Test(object):
|
||||
... environ = {'key': 'value'}
|
||||
... test = environ_property('key')
|
||||
>>> var = Test()
|
||||
>>> var.test
|
||||
'value'
|
||||
|
||||
If you pass it a second value it's used as default if the key does not
|
||||
exist, the third one can be a converter that takes a value and converts
|
||||
it. If it raises :exc:`ValueError` or :exc:`TypeError` the default value
|
||||
is used. If no default value is provided `None` is used.
|
||||
|
||||
Per default the property is read only. You have to explicitly enable it
|
||||
by passing ``read_only=False`` to the constructor.
|
||||
"""
|
||||
|
||||
read_only = True
|
||||
|
||||
def lookup(self, obj):
|
||||
return obj.environ
|
||||
|
||||
|
||||
class header_property(_DictAccessorProperty):
|
||||
"""Like `environ_property` but for headers."""
|
||||
|
||||
def lookup(self, obj):
|
||||
return obj.headers
|
||||
|
||||
|
||||
class HTMLBuilder(object):
|
||||
"""Helper object for HTML generation.
|
||||
|
||||
Per default there are two instances of that class. The `html` one, and
|
||||
the `xhtml` one for those two dialects. The class uses keyword parameters
|
||||
and positional parameters to generate small snippets of HTML.
|
||||
|
||||
Keyword parameters are converted to XML/SGML attributes, positional
|
||||
arguments are used as children. Because Python accepts positional
|
||||
arguments before keyword arguments it's a good idea to use a list with the
|
||||
star-syntax for some children:
|
||||
|
||||
>>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ',
|
||||
... html.a('bar', href='bar.html')])
|
||||
u'<p class="foo"><a href="foo.html">foo</a> <a href="bar.html">bar</a></p>'
|
||||
|
||||
This class works around some browser limitations and can not be used for
|
||||
arbitrary SGML/XML generation. For that purpose lxml and similar
|
||||
libraries exist.
|
||||
|
||||
Calling the builder escapes the string passed:
|
||||
|
||||
>>> html.p(html("<foo>"))
|
||||
u'<p><foo></p>'
|
||||
"""
|
||||
|
||||
_entity_re = re.compile(r"&([^;]+);")
|
||||
_entities = name2codepoint.copy()
|
||||
_entities["apos"] = 39
|
||||
_empty_elements = {
|
||||
"area",
|
||||
"base",
|
||||
"basefont",
|
||||
"br",
|
||||
"col",
|
||||
"command",
|
||||
"embed",
|
||||
"frame",
|
||||
"hr",
|
||||
"img",
|
||||
"input",
|
||||
"keygen",
|
||||
"isindex",
|
||||
"link",
|
||||
"meta",
|
||||
"param",
|
||||
"source",
|
||||
"wbr",
|
||||
}
|
||||
_boolean_attributes = {
|
||||
"selected",
|
||||
"checked",
|
||||
"compact",
|
||||
"declare",
|
||||
"defer",
|
||||
"disabled",
|
||||
"ismap",
|
||||
"multiple",
|
||||
"nohref",
|
||||
"noresize",
|
||||
"noshade",
|
||||
"nowrap",
|
||||
}
|
||||
_plaintext_elements = {"textarea"}
|
||||
_c_like_cdata = {"script", "style"}
|
||||
|
||||
def __init__(self, dialect):
|
||||
self._dialect = dialect
|
||||
|
||||
def __call__(self, s):
|
||||
return escape(s)
|
||||
|
||||
def __getattr__(self, tag):
|
||||
if tag[:2] == "__":
|
||||
raise AttributeError(tag)
|
||||
|
||||
def proxy(*children, **arguments):
|
||||
buffer = "<" + tag
|
||||
for key, value in iteritems(arguments):
|
||||
if value is None:
|
||||
continue
|
||||
if key[-1] == "_":
|
||||
key = key[:-1]
|
||||
if key in self._boolean_attributes:
|
||||
if not value:
|
||||
continue
|
||||
if self._dialect == "xhtml":
|
||||
value = '="' + key + '"'
|
||||
else:
|
||||
value = ""
|
||||
else:
|
||||
value = '="' + escape(value) + '"'
|
||||
buffer += " " + key + value
|
||||
if not children and tag in self._empty_elements:
|
||||
if self._dialect == "xhtml":
|
||||
buffer += " />"
|
||||
else:
|
||||
buffer += ">"
|
||||
return buffer
|
||||
buffer += ">"
|
||||
|
||||
children_as_string = "".join(
|
||||
[text_type(x) for x in children if x is not None]
|
||||
)
|
||||
|
||||
if children_as_string:
|
||||
if tag in self._plaintext_elements:
|
||||
children_as_string = escape(children_as_string)
|
||||
elif tag in self._c_like_cdata and self._dialect == "xhtml":
|
||||
children_as_string = (
|
||||
"/*<![CDATA[*/" + children_as_string + "/*]]>*/"
|
||||
)
|
||||
buffer += children_as_string + "</" + tag + ">"
|
||||
return buffer
|
||||
|
||||
return proxy
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s for %r>" % (self.__class__.__name__, self._dialect)
|
||||
|
||||
|
||||
html = HTMLBuilder("html")
|
||||
xhtml = HTMLBuilder("xhtml")
|
||||
|
||||
# https://cgit.freedesktop.org/xdg/shared-mime-info/tree/freedesktop.org.xml.in
|
||||
# https://www.iana.org/assignments/media-types/media-types.xhtml
|
||||
# Types listed in the XDG mime info that have a charset in the IANA registration.
|
||||
_charset_mimetypes = {
|
||||
"application/ecmascript",
|
||||
"application/javascript",
|
||||
"application/sql",
|
||||
"application/xml",
|
||||
"application/xml-dtd",
|
||||
"application/xml-external-parsed-entity",
|
||||
}
|
||||
|
||||
|
||||
def get_content_type(mimetype, charset):
|
||||
"""Returns the full content type string with charset for a mimetype.
|
||||
|
||||
If the mimetype represents text, the charset parameter will be
|
||||
appended, otherwise the mimetype is returned unchanged.
|
||||
|
||||
:param mimetype: The mimetype to be used as content type.
|
||||
:param charset: The charset to be appended for text mimetypes.
|
||||
:return: The content type.
|
||||
|
||||
.. verionchanged:: 0.15
|
||||
Any type that ends with ``+xml`` gets a charset, not just those
|
||||
that start with ``application/``. Known text types such as
|
||||
``application/javascript`` are also given charsets.
|
||||
"""
|
||||
if (
|
||||
mimetype.startswith("text/")
|
||||
or mimetype in _charset_mimetypes
|
||||
or mimetype.endswith("+xml")
|
||||
):
|
||||
mimetype += "; charset=" + charset
|
||||
|
||||
return mimetype
|
||||
|
||||
|
||||
def detect_utf_encoding(data):
|
||||
"""Detect which UTF encoding was used to encode the given bytes.
|
||||
|
||||
The latest JSON standard (:rfc:`8259`) suggests that only UTF-8 is
|
||||
accepted. Older documents allowed 8, 16, or 32. 16 and 32 can be big
|
||||
or little endian. Some editors or libraries may prepend a BOM.
|
||||
|
||||
:internal:
|
||||
|
||||
:param data: Bytes in unknown UTF encoding.
|
||||
:return: UTF encoding name
|
||||
|
||||
.. versionadded:: 0.15
|
||||
"""
|
||||
head = data[:4]
|
||||
|
||||
if head[:3] == codecs.BOM_UTF8:
|
||||
return "utf-8-sig"
|
||||
|
||||
if b"\x00" not in head:
|
||||
return "utf-8"
|
||||
|
||||
if head in (codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE):
|
||||
return "utf-32"
|
||||
|
||||
if head[:2] in (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE):
|
||||
return "utf-16"
|
||||
|
||||
if len(head) == 4:
|
||||
if head[:3] == b"\x00\x00\x00":
|
||||
return "utf-32-be"
|
||||
|
||||
if head[::2] == b"\x00\x00":
|
||||
return "utf-16-be"
|
||||
|
||||
if head[1:] == b"\x00\x00\x00":
|
||||
return "utf-32-le"
|
||||
|
||||
if head[1::2] == b"\x00\x00":
|
||||
return "utf-16-le"
|
||||
|
||||
if len(head) == 2:
|
||||
return "utf-16-be" if head.startswith(b"\x00") else "utf-16-le"
|
||||
|
||||
return "utf-8"
|
||||
|
||||
|
||||
def format_string(string, context):
|
||||
"""String-template format a string:
|
||||
|
||||
>>> format_string('$foo and ${foo}s', dict(foo=42))
|
||||
'42 and 42s'
|
||||
|
||||
This does not do any attribute lookup etc. For more advanced string
|
||||
formattings have a look at the `werkzeug.template` module.
|
||||
|
||||
:param string: the format string.
|
||||
:param context: a dict with the variables to insert.
|
||||
"""
|
||||
|
||||
def lookup_arg(match):
|
||||
x = context[match.group(1) or match.group(2)]
|
||||
if not isinstance(x, string_types):
|
||||
x = type(string)(x)
|
||||
return x
|
||||
|
||||
return _format_re.sub(lookup_arg, string)
|
||||
|
||||
|
||||
def secure_filename(filename):
|
||||
r"""Pass it a filename and it will return a secure version of it. This
|
||||
filename can then safely be stored on a regular file system and passed
|
||||
to :func:`os.path.join`. The filename returned is an ASCII only string
|
||||
for maximum portability.
|
||||
|
||||
On windows systems the function also makes sure that the file is not
|
||||
named after one of the special device files.
|
||||
|
||||
>>> secure_filename("My cool movie.mov")
|
||||
'My_cool_movie.mov'
|
||||
>>> secure_filename("../../../etc/passwd")
|
||||
'etc_passwd'
|
||||
>>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
|
||||
'i_contain_cool_umlauts.txt'
|
||||
|
||||
The function might return an empty filename. It's your responsibility
|
||||
to ensure that the filename is unique and that you abort or
|
||||
generate a random filename if the function returned an empty one.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
|
||||
:param filename: the filename to secure
|
||||
"""
|
||||
if isinstance(filename, text_type):
|
||||
from unicodedata import normalize
|
||||
|
||||
filename = normalize("NFKD", filename).encode("ascii", "ignore")
|
||||
if not PY2:
|
||||
filename = filename.decode("ascii")
|
||||
for sep in os.path.sep, os.path.altsep:
|
||||
if sep:
|
||||
filename = filename.replace(sep, " ")
|
||||
filename = str(_filename_ascii_strip_re.sub("", "_".join(filename.split()))).strip(
|
||||
"._"
|
||||
)
|
||||
|
||||
# on nt a couple of special files are present in each folder. We
|
||||
# have to ensure that the target file is not such a filename. In
|
||||
# this case we prepend an underline
|
||||
if (
|
||||
os.name == "nt"
|
||||
and filename
|
||||
and filename.split(".")[0].upper() in _windows_device_files
|
||||
):
|
||||
filename = "_" + filename
|
||||
|
||||
return filename
|
||||
|
||||
|
||||
def escape(s, quote=None):
|
||||
"""Replace special characters "&", "<", ">" and (") to HTML-safe sequences.
|
||||
|
||||
There is a special handling for `None` which escapes to an empty string.
|
||||
|
||||
.. versionchanged:: 0.9
|
||||
`quote` is now implicitly on.
|
||||
|
||||
:param s: the string to escape.
|
||||
:param quote: ignored.
|
||||
"""
|
||||
if s is None:
|
||||
return ""
|
||||
elif hasattr(s, "__html__"):
|
||||
return text_type(s.__html__())
|
||||
elif not isinstance(s, string_types):
|
||||
s = text_type(s)
|
||||
if quote is not None:
|
||||
from warnings import warn
|
||||
|
||||
warn(
|
||||
"The 'quote' parameter is no longer used as of version 0.9"
|
||||
" and will be removed in version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
s = (
|
||||
s.replace("&", "&")
|
||||
.replace("<", "<")
|
||||
.replace(">", ">")
|
||||
.replace('"', """)
|
||||
)
|
||||
return s
|
||||
|
||||
|
||||
def unescape(s):
|
||||
"""The reverse function of `escape`. This unescapes all the HTML
|
||||
entities, not only the XML entities inserted by `escape`.
|
||||
|
||||
:param s: the string to unescape.
|
||||
"""
|
||||
|
||||
def handle_match(m):
|
||||
name = m.group(1)
|
||||
if name in HTMLBuilder._entities:
|
||||
return unichr(HTMLBuilder._entities[name])
|
||||
try:
|
||||
if name[:2] in ("#x", "#X"):
|
||||
return unichr(int(name[2:], 16))
|
||||
elif name.startswith("#"):
|
||||
return unichr(int(name[1:]))
|
||||
except ValueError:
|
||||
pass
|
||||
return u""
|
||||
|
||||
return _entity_re.sub(handle_match, s)
|
||||
|
||||
|
||||
def redirect(location, code=302, Response=None):
|
||||
"""Returns a response object (a WSGI application) that, if called,
|
||||
redirects the client to the target location. Supported codes are
|
||||
301, 302, 303, 305, 307, and 308. 300 is not supported because
|
||||
it's not a real redirect and 304 because it's the answer for a
|
||||
request with a request with defined If-Modified-Since headers.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
The location can now be a unicode string that is encoded using
|
||||
the :func:`iri_to_uri` function.
|
||||
|
||||
.. versionadded:: 0.10
|
||||
The class used for the Response object can now be passed in.
|
||||
|
||||
:param location: the location the response should redirect to.
|
||||
:param code: the redirect status code. defaults to 302.
|
||||
:param class Response: a Response class to use when instantiating a
|
||||
response. The default is :class:`werkzeug.wrappers.Response` if
|
||||
unspecified.
|
||||
"""
|
||||
if Response is None:
|
||||
from .wrappers import Response
|
||||
|
||||
display_location = escape(location)
|
||||
if isinstance(location, text_type):
|
||||
# Safe conversion is necessary here as we might redirect
|
||||
# to a broken URI scheme (for instance itms-services).
|
||||
from .urls import iri_to_uri
|
||||
|
||||
location = iri_to_uri(location, safe_conversion=True)
|
||||
response = Response(
|
||||
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
|
||||
"<title>Redirecting...</title>\n"
|
||||
"<h1>Redirecting...</h1>\n"
|
||||
"<p>You should be redirected automatically to target URL: "
|
||||
'<a href="%s">%s</a>. If not click the link.'
|
||||
% (escape(location), display_location),
|
||||
code,
|
||||
mimetype="text/html",
|
||||
)
|
||||
response.headers["Location"] = location
|
||||
return response
|
||||
|
||||
|
||||
def append_slash_redirect(environ, code=301):
|
||||
"""Redirects to the same URL but with a slash appended. The behavior
|
||||
of this function is undefined if the path ends with a slash already.
|
||||
|
||||
:param environ: the WSGI environment for the request that triggers
|
||||
the redirect.
|
||||
:param code: the status code for the redirect.
|
||||
"""
|
||||
new_path = environ["PATH_INFO"].strip("/") + "/"
|
||||
query_string = environ.get("QUERY_STRING")
|
||||
if query_string:
|
||||
new_path += "?" + query_string
|
||||
return redirect(new_path, code)
|
||||
|
||||
|
||||
def import_string(import_name, silent=False):
|
||||
"""Imports an object based on a string. This is useful if you want to
|
||||
use import paths as endpoints or something similar. An import path can
|
||||
be specified either in dotted notation (``xml.sax.saxutils.escape``)
|
||||
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
|
||||
|
||||
If `silent` is True the return value will be `None` if the import fails.
|
||||
|
||||
:param import_name: the dotted name for the object to import.
|
||||
:param silent: if set to `True` import errors are ignored and
|
||||
`None` is returned instead.
|
||||
:return: imported object
|
||||
"""
|
||||
# force the import name to automatically convert to strings
|
||||
# __import__ is not able to handle unicode strings in the fromlist
|
||||
# if the module is a package
|
||||
import_name = str(import_name).replace(":", ".")
|
||||
try:
|
||||
try:
|
||||
__import__(import_name)
|
||||
except ImportError:
|
||||
if "." not in import_name:
|
||||
raise
|
||||
else:
|
||||
return sys.modules[import_name]
|
||||
|
||||
module_name, obj_name = import_name.rsplit(".", 1)
|
||||
module = __import__(module_name, globals(), locals(), [obj_name])
|
||||
try:
|
||||
return getattr(module, obj_name)
|
||||
except AttributeError as e:
|
||||
raise ImportError(e)
|
||||
|
||||
except ImportError as e:
|
||||
if not silent:
|
||||
reraise(
|
||||
ImportStringError, ImportStringError(import_name, e), sys.exc_info()[2]
|
||||
)
|
||||
|
||||
|
||||
def find_modules(import_path, include_packages=False, recursive=False):
|
||||
"""Finds all the modules below a package. This can be useful to
|
||||
automatically import all views / controllers so that their metaclasses /
|
||||
function decorators have a chance to register themselves on the
|
||||
application.
|
||||
|
||||
Packages are not returned unless `include_packages` is `True`. This can
|
||||
also recursively list modules but in that case it will import all the
|
||||
packages to get the correct load path of that module.
|
||||
|
||||
:param import_path: the dotted name for the package to find child modules.
|
||||
:param include_packages: set to `True` if packages should be returned, too.
|
||||
:param recursive: set to `True` if recursion should happen.
|
||||
:return: generator
|
||||
"""
|
||||
module = import_string(import_path)
|
||||
path = getattr(module, "__path__", None)
|
||||
if path is None:
|
||||
raise ValueError("%r is not a package" % import_path)
|
||||
basename = module.__name__ + "."
|
||||
for _importer, modname, ispkg in pkgutil.iter_modules(path):
|
||||
modname = basename + modname
|
||||
if ispkg:
|
||||
if include_packages:
|
||||
yield modname
|
||||
if recursive:
|
||||
for item in find_modules(modname, include_packages, True):
|
||||
yield item
|
||||
else:
|
||||
yield modname
|
||||
|
||||
|
||||
def validate_arguments(func, args, kwargs, drop_extra=True):
|
||||
"""Checks if the function accepts the arguments and keyword arguments.
|
||||
Returns a new ``(args, kwargs)`` tuple that can safely be passed to
|
||||
the function without causing a `TypeError` because the function signature
|
||||
is incompatible. If `drop_extra` is set to `True` (which is the default)
|
||||
any extra positional or keyword arguments are dropped automatically.
|
||||
|
||||
The exception raised provides three attributes:
|
||||
|
||||
`missing`
|
||||
A set of argument names that the function expected but where
|
||||
missing.
|
||||
|
||||
`extra`
|
||||
A dict of keyword arguments that the function can not handle but
|
||||
where provided.
|
||||
|
||||
`extra_positional`
|
||||
A list of values that where given by positional argument but the
|
||||
function cannot accept.
|
||||
|
||||
This can be useful for decorators that forward user submitted data to
|
||||
a view function::
|
||||
|
||||
from werkzeug.utils import ArgumentValidationError, validate_arguments
|
||||
|
||||
def sanitize(f):
|
||||
def proxy(request):
|
||||
data = request.values.to_dict()
|
||||
try:
|
||||
args, kwargs = validate_arguments(f, (request,), data)
|
||||
except ArgumentValidationError:
|
||||
raise BadRequest('The browser failed to transmit all '
|
||||
'the data expected.')
|
||||
return f(*args, **kwargs)
|
||||
return proxy
|
||||
|
||||
:param func: the function the validation is performed against.
|
||||
:param args: a tuple of positional arguments.
|
||||
:param kwargs: a dict of keyword arguments.
|
||||
:param drop_extra: set to `False` if you don't want extra arguments
|
||||
to be silently dropped.
|
||||
:return: tuple in the form ``(args, kwargs)``.
|
||||
"""
|
||||
parser = _parse_signature(func)
|
||||
args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5]
|
||||
if missing:
|
||||
raise ArgumentValidationError(tuple(missing))
|
||||
elif (extra or extra_positional) and not drop_extra:
|
||||
raise ArgumentValidationError(None, extra, extra_positional)
|
||||
return tuple(args), kwargs
|
||||
|
||||
|
||||
def bind_arguments(func, args, kwargs):
|
||||
"""Bind the arguments provided into a dict. When passed a function,
|
||||
a tuple of arguments and a dict of keyword arguments `bind_arguments`
|
||||
returns a dict of names as the function would see it. This can be useful
|
||||
to implement a cache decorator that uses the function arguments to build
|
||||
the cache key based on the values of the arguments.
|
||||
|
||||
:param func: the function the arguments should be bound for.
|
||||
:param args: tuple of positional arguments.
|
||||
:param kwargs: a dict of keyword arguments.
|
||||
:return: a :class:`dict` of bound keyword arguments.
|
||||
"""
|
||||
(
|
||||
args,
|
||||
kwargs,
|
||||
missing,
|
||||
extra,
|
||||
extra_positional,
|
||||
arg_spec,
|
||||
vararg_var,
|
||||
kwarg_var,
|
||||
) = _parse_signature(func)(args, kwargs)
|
||||
values = {}
|
||||
for (name, _has_default, _default), value in zip(arg_spec, args):
|
||||
values[name] = value
|
||||
if vararg_var is not None:
|
||||
values[vararg_var] = tuple(extra_positional)
|
||||
elif extra_positional:
|
||||
raise TypeError("too many positional arguments")
|
||||
if kwarg_var is not None:
|
||||
multikw = set(extra) & set([x[0] for x in arg_spec])
|
||||
if multikw:
|
||||
raise TypeError(
|
||||
"got multiple values for keyword argument " + repr(next(iter(multikw)))
|
||||
)
|
||||
values[kwarg_var] = extra
|
||||
elif extra:
|
||||
raise TypeError("got unexpected keyword argument " + repr(next(iter(extra))))
|
||||
return values
|
||||
|
||||
|
||||
class ArgumentValidationError(ValueError):
|
||||
|
||||
"""Raised if :func:`validate_arguments` fails to validate"""
|
||||
|
||||
def __init__(self, missing=None, extra=None, extra_positional=None):
|
||||
self.missing = set(missing or ())
|
||||
self.extra = extra or {}
|
||||
self.extra_positional = extra_positional or []
|
||||
ValueError.__init__(
|
||||
self,
|
||||
"function arguments invalid. (%d missing, %d additional)"
|
||||
% (len(self.missing), len(self.extra) + len(self.extra_positional)),
|
||||
)
|
||||
|
||||
|
||||
class ImportStringError(ImportError):
|
||||
"""Provides information about a failed :func:`import_string` attempt."""
|
||||
|
||||
#: String in dotted notation that failed to be imported.
|
||||
import_name = None
|
||||
#: Wrapped exception.
|
||||
exception = None
|
||||
|
||||
def __init__(self, import_name, exception):
|
||||
self.import_name = import_name
|
||||
self.exception = exception
|
||||
|
||||
msg = (
|
||||
"import_string() failed for %r. Possible reasons are:\n\n"
|
||||
"- missing __init__.py in a package;\n"
|
||||
"- package or module path not included in sys.path;\n"
|
||||
"- duplicated package or module name taking precedence in "
|
||||
"sys.path;\n"
|
||||
"- missing module, class, function or variable;\n\n"
|
||||
"Debugged import:\n\n%s\n\n"
|
||||
"Original exception:\n\n%s: %s"
|
||||
)
|
||||
|
||||
name = ""
|
||||
tracked = []
|
||||
for part in import_name.replace(":", ".").split("."):
|
||||
name += (name and ".") + part
|
||||
imported = import_string(name, silent=True)
|
||||
if imported:
|
||||
tracked.append((name, getattr(imported, "__file__", None)))
|
||||
else:
|
||||
track = ["- %r found in %r." % (n, i) for n, i in tracked]
|
||||
track.append("- %r not found." % name)
|
||||
msg = msg % (
|
||||
import_name,
|
||||
"\n".join(track),
|
||||
exception.__class__.__name__,
|
||||
str(exception),
|
||||
)
|
||||
break
|
||||
|
||||
ImportError.__init__(self, msg)
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s(%r, %r)>" % (
|
||||
self.__class__.__name__,
|
||||
self.import_name,
|
||||
self.exception,
|
||||
)
|
||||
|
||||
|
||||
# DEPRECATED
|
||||
from .datastructures import CombinedMultiDict as _CombinedMultiDict
|
||||
from .datastructures import EnvironHeaders as _EnvironHeaders
|
||||
from .datastructures import Headers as _Headers
|
||||
from .datastructures import MultiDict as _MultiDict
|
||||
from .http import dump_cookie as _dump_cookie
|
||||
from .http import parse_cookie as _parse_cookie
|
||||
|
||||
|
||||
class MultiDict(_MultiDict):
|
||||
def __init__(self, *args, **kwargs):
|
||||
warnings.warn(
|
||||
"'werkzeug.utils.MultiDict' has moved to 'werkzeug"
|
||||
".datastructures.MultiDict' as of version 0.5. This old"
|
||||
" import will be removed in version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
super(MultiDict, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class CombinedMultiDict(_CombinedMultiDict):
|
||||
def __init__(self, *args, **kwargs):
|
||||
warnings.warn(
|
||||
"'werkzeug.utils.CombinedMultiDict' has moved to 'werkzeug"
|
||||
".datastructures.CombinedMultiDict' as of version 0.5. This"
|
||||
" old import will be removed in version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
super(CombinedMultiDict, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class Headers(_Headers):
|
||||
def __init__(self, *args, **kwargs):
|
||||
warnings.warn(
|
||||
"'werkzeug.utils.Headers' has moved to 'werkzeug"
|
||||
".datastructures.Headers' as of version 0.5. This old"
|
||||
" import will be removed in version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
super(Headers, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class EnvironHeaders(_EnvironHeaders):
|
||||
def __init__(self, *args, **kwargs):
|
||||
warnings.warn(
|
||||
"'werkzeug.utils.EnvironHeaders' has moved to 'werkzeug"
|
||||
".datastructures.EnvironHeaders' as of version 0.5. This"
|
||||
" old import will be removed in version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
super(EnvironHeaders, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
def parse_cookie(*args, **kwargs):
|
||||
warnings.warn(
|
||||
"'werkzeug.utils.parse_cookie' as moved to 'werkzeug.http"
|
||||
".parse_cookie' as of version 0.5. This old import will be"
|
||||
" removed in version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return _parse_cookie(*args, **kwargs)
|
||||
|
||||
|
||||
def dump_cookie(*args, **kwargs):
|
||||
warnings.warn(
|
||||
"'werkzeug.utils.dump_cookie' as moved to 'werkzeug.http"
|
||||
".dump_cookie' as of version 0.5. This old import will be"
|
||||
" removed in version 1.0.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return _dump_cookie(*args, **kwargs)
|
||||
36
python/werkzeug/wrappers/__init__.py
Normal file
36
python/werkzeug/wrappers/__init__.py
Normal file
@@ -0,0 +1,36 @@
|
||||
"""
|
||||
werkzeug.wrappers
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
The wrappers are simple request and response objects which you can
|
||||
subclass to do whatever you want them to do. The request object contains
|
||||
the information transmitted by the client (webbrowser) and the response
|
||||
object contains all the information sent back to the browser.
|
||||
|
||||
An important detail is that the request object is created with the WSGI
|
||||
environ and will act as high-level proxy whereas the response object is an
|
||||
actual WSGI application.
|
||||
|
||||
Like everything else in Werkzeug these objects will work correctly with
|
||||
unicode data. Incoming form data parsed by the response object will be
|
||||
decoded into an unicode object if possible and if it makes sense.
|
||||
|
||||
:copyright: 2007 Pallets
|
||||
:license: BSD-3-Clause
|
||||
"""
|
||||
from .accept import AcceptMixin
|
||||
from .auth import AuthorizationMixin
|
||||
from .auth import WWWAuthenticateMixin
|
||||
from .base_request import BaseRequest
|
||||
from .base_response import BaseResponse
|
||||
from .common_descriptors import CommonRequestDescriptorsMixin
|
||||
from .common_descriptors import CommonResponseDescriptorsMixin
|
||||
from .etag import ETagRequestMixin
|
||||
from .etag import ETagResponseMixin
|
||||
from .request import PlainRequest
|
||||
from .request import Request
|
||||
from .request import StreamOnlyMixin
|
||||
from .response import Response
|
||||
from .response import ResponseStream
|
||||
from .response import ResponseStreamMixin
|
||||
from .user_agent import UserAgentMixin
|
||||
50
python/werkzeug/wrappers/accept.py
Normal file
50
python/werkzeug/wrappers/accept.py
Normal file
@@ -0,0 +1,50 @@
|
||||
from ..datastructures import CharsetAccept
|
||||
from ..datastructures import LanguageAccept
|
||||
from ..datastructures import MIMEAccept
|
||||
from ..http import parse_accept_header
|
||||
from ..utils import cached_property
|
||||
|
||||
|
||||
class AcceptMixin(object):
|
||||
"""A mixin for classes with an :attr:`~BaseResponse.environ` attribute
|
||||
to get all the HTTP accept headers as
|
||||
:class:`~werkzeug.datastructures.Accept` objects (or subclasses
|
||||
thereof).
|
||||
"""
|
||||
|
||||
@cached_property
|
||||
def accept_mimetypes(self):
|
||||
"""List of mimetypes this client supports as
|
||||
:class:`~werkzeug.datastructures.MIMEAccept` object.
|
||||
"""
|
||||
return parse_accept_header(self.environ.get("HTTP_ACCEPT"), MIMEAccept)
|
||||
|
||||
@cached_property
|
||||
def accept_charsets(self):
|
||||
"""List of charsets this client supports as
|
||||
:class:`~werkzeug.datastructures.CharsetAccept` object.
|
||||
"""
|
||||
return parse_accept_header(
|
||||
self.environ.get("HTTP_ACCEPT_CHARSET"), CharsetAccept
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def accept_encodings(self):
|
||||
"""List of encodings this client accepts. Encodings in a HTTP term
|
||||
are compression encodings such as gzip. For charsets have a look at
|
||||
:attr:`accept_charset`.
|
||||
"""
|
||||
return parse_accept_header(self.environ.get("HTTP_ACCEPT_ENCODING"))
|
||||
|
||||
@cached_property
|
||||
def accept_languages(self):
|
||||
"""List of languages this client accepts as
|
||||
:class:`~werkzeug.datastructures.LanguageAccept` object.
|
||||
|
||||
.. versionchanged 0.5
|
||||
In previous versions this was a regular
|
||||
:class:`~werkzeug.datastructures.Accept` object.
|
||||
"""
|
||||
return parse_accept_header(
|
||||
self.environ.get("HTTP_ACCEPT_LANGUAGE"), LanguageAccept
|
||||
)
|
||||
33
python/werkzeug/wrappers/auth.py
Normal file
33
python/werkzeug/wrappers/auth.py
Normal file
@@ -0,0 +1,33 @@
|
||||
from ..http import parse_authorization_header
|
||||
from ..http import parse_www_authenticate_header
|
||||
from ..utils import cached_property
|
||||
|
||||
|
||||
class AuthorizationMixin(object):
|
||||
"""Adds an :attr:`authorization` property that represents the parsed
|
||||
value of the `Authorization` header as
|
||||
:class:`~werkzeug.datastructures.Authorization` object.
|
||||
"""
|
||||
|
||||
@cached_property
|
||||
def authorization(self):
|
||||
"""The `Authorization` object in parsed form."""
|
||||
header = self.environ.get("HTTP_AUTHORIZATION")
|
||||
return parse_authorization_header(header)
|
||||
|
||||
|
||||
class WWWAuthenticateMixin(object):
|
||||
"""Adds a :attr:`www_authenticate` property to a response object."""
|
||||
|
||||
@property
|
||||
def www_authenticate(self):
|
||||
"""The `WWW-Authenticate` header in a parsed form."""
|
||||
|
||||
def on_update(www_auth):
|
||||
if not www_auth and "www-authenticate" in self.headers:
|
||||
del self.headers["www-authenticate"]
|
||||
elif www_auth:
|
||||
self.headers["WWW-Authenticate"] = www_auth.to_header()
|
||||
|
||||
header = self.headers.get("www-authenticate")
|
||||
return parse_www_authenticate_header(header, on_update)
|
||||
693
python/werkzeug/wrappers/base_request.py
Normal file
693
python/werkzeug/wrappers/base_request.py
Normal file
@@ -0,0 +1,693 @@
|
||||
import warnings
|
||||
from functools import update_wrapper
|
||||
from io import BytesIO
|
||||
|
||||
from .._compat import to_native
|
||||
from .._compat import to_unicode
|
||||
from .._compat import wsgi_decoding_dance
|
||||
from .._compat import wsgi_get_bytes
|
||||
from ..datastructures import CombinedMultiDict
|
||||
from ..datastructures import EnvironHeaders
|
||||
from ..datastructures import ImmutableList
|
||||
from ..datastructures import ImmutableMultiDict
|
||||
from ..datastructures import ImmutableTypeConversionDict
|
||||
from ..datastructures import iter_multi_items
|
||||
from ..datastructures import MultiDict
|
||||
from ..formparser import default_stream_factory
|
||||
from ..formparser import FormDataParser
|
||||
from ..http import parse_cookie
|
||||
from ..http import parse_options_header
|
||||
from ..urls import url_decode
|
||||
from ..utils import cached_property
|
||||
from ..utils import environ_property
|
||||
from ..wsgi import get_content_length
|
||||
from ..wsgi import get_current_url
|
||||
from ..wsgi import get_host
|
||||
from ..wsgi import get_input_stream
|
||||
|
||||
|
||||
class BaseRequest(object):
|
||||
"""Very basic request object. This does not implement advanced stuff like
|
||||
entity tag parsing or cache controls. The request object is created with
|
||||
the WSGI environment as first argument and will add itself to the WSGI
|
||||
environment as ``'werkzeug.request'`` unless it's created with
|
||||
`populate_request` set to False.
|
||||
|
||||
There are a couple of mixins available that add additional functionality
|
||||
to the request object, there is also a class called `Request` which
|
||||
subclasses `BaseRequest` and all the important mixins.
|
||||
|
||||
It's a good idea to create a custom subclass of the :class:`BaseRequest`
|
||||
and add missing functionality either via mixins or direct implementation.
|
||||
Here an example for such subclasses::
|
||||
|
||||
from werkzeug.wrappers import BaseRequest, ETagRequestMixin
|
||||
|
||||
class Request(BaseRequest, ETagRequestMixin):
|
||||
pass
|
||||
|
||||
Request objects are **read only**. As of 0.5 modifications are not
|
||||
allowed in any place. Unlike the lower level parsing functions the
|
||||
request object will use immutable objects everywhere possible.
|
||||
|
||||
Per default the request object will assume all the text data is `utf-8`
|
||||
encoded. Please refer to :doc:`the unicode chapter </unicode>` for more
|
||||
details about customizing the behavior.
|
||||
|
||||
Per default the request object will be added to the WSGI
|
||||
environment as `werkzeug.request` to support the debugging system.
|
||||
If you don't want that, set `populate_request` to `False`.
|
||||
|
||||
If `shallow` is `True` the environment is initialized as shallow
|
||||
object around the environ. Every operation that would modify the
|
||||
environ in any way (such as consuming form data) raises an exception
|
||||
unless the `shallow` attribute is explicitly set to `False`. This
|
||||
is useful for middlewares where you don't want to consume the form
|
||||
data by accident. A shallow request is not populated to the WSGI
|
||||
environment.
|
||||
|
||||
.. versionchanged:: 0.5
|
||||
read-only mode was enforced by using immutables classes for all
|
||||
data.
|
||||
"""
|
||||
|
||||
#: the charset for the request, defaults to utf-8
|
||||
charset = "utf-8"
|
||||
|
||||
#: the error handling procedure for errors, defaults to 'replace'
|
||||
encoding_errors = "replace"
|
||||
|
||||
#: the maximum content length. This is forwarded to the form data
|
||||
#: parsing function (:func:`parse_form_data`). When set and the
|
||||
#: :attr:`form` or :attr:`files` attribute is accessed and the
|
||||
#: parsing fails because more than the specified value is transmitted
|
||||
#: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
|
||||
#:
|
||||
#: Have a look at :ref:`dealing-with-request-data` for more details.
|
||||
#:
|
||||
#: .. versionadded:: 0.5
|
||||
max_content_length = None
|
||||
|
||||
#: the maximum form field size. This is forwarded to the form data
|
||||
#: parsing function (:func:`parse_form_data`). When set and the
|
||||
#: :attr:`form` or :attr:`files` attribute is accessed and the
|
||||
#: data in memory for post data is longer than the specified value a
|
||||
#: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
|
||||
#:
|
||||
#: Have a look at :ref:`dealing-with-request-data` for more details.
|
||||
#:
|
||||
#: .. versionadded:: 0.5
|
||||
max_form_memory_size = None
|
||||
|
||||
#: the class to use for `args` and `form`. The default is an
|
||||
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
|
||||
#: multiple values per key. alternatively it makes sense to use an
|
||||
#: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which
|
||||
#: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`
|
||||
#: which is the fastest but only remembers the last key. It is also
|
||||
#: possible to use mutable structures, but this is not recommended.
|
||||
#:
|
||||
#: .. versionadded:: 0.6
|
||||
parameter_storage_class = ImmutableMultiDict
|
||||
|
||||
#: the type to be used for list values from the incoming WSGI environment.
|
||||
#: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
|
||||
#: (for example for :attr:`access_list`).
|
||||
#:
|
||||
#: .. versionadded:: 0.6
|
||||
list_storage_class = ImmutableList
|
||||
|
||||
#: the type to be used for dict values from the incoming WSGI environment.
|
||||
#: By default an
|
||||
#: :class:`~werkzeug.datastructures.ImmutableTypeConversionDict` is used
|
||||
#: (for example for :attr:`cookies`).
|
||||
#:
|
||||
#: .. versionadded:: 0.6
|
||||
dict_storage_class = ImmutableTypeConversionDict
|
||||
|
||||
#: The form data parser that shoud be used. Can be replaced to customize
|
||||
#: the form date parsing.
|
||||
form_data_parser_class = FormDataParser
|
||||
|
||||
#: Optionally a list of hosts that is trusted by this request. By default
|
||||
#: all hosts are trusted which means that whatever the client sends the
|
||||
#: host is will be accepted.
|
||||
#:
|
||||
#: Because `Host` and `X-Forwarded-Host` headers can be set to any value by
|
||||
#: a malicious client, it is recommended to either set this property or
|
||||
#: implement similar validation in the proxy (if application is being run
|
||||
#: behind one).
|
||||
#:
|
||||
#: .. versionadded:: 0.9
|
||||
trusted_hosts = None
|
||||
|
||||
#: Indicates whether the data descriptor should be allowed to read and
|
||||
#: buffer up the input stream. By default it's enabled.
|
||||
#:
|
||||
#: .. versionadded:: 0.9
|
||||
disable_data_descriptor = False
|
||||
|
||||
def __init__(self, environ, populate_request=True, shallow=False):
|
||||
self.environ = environ
|
||||
if populate_request and not shallow:
|
||||
self.environ["werkzeug.request"] = self
|
||||
self.shallow = shallow
|
||||
|
||||
def __repr__(self):
|
||||
# make sure the __repr__ even works if the request was created
|
||||
# from an invalid WSGI environment. If we display the request
|
||||
# in a debug session we don't want the repr to blow up.
|
||||
args = []
|
||||
try:
|
||||
args.append("'%s'" % to_native(self.url, self.url_charset))
|
||||
args.append("[%s]" % self.method)
|
||||
except Exception:
|
||||
args.append("(invalid WSGI environ)")
|
||||
|
||||
return "<%s %s>" % (self.__class__.__name__, " ".join(args))
|
||||
|
||||
@property
|
||||
def url_charset(self):
|
||||
"""The charset that is assumed for URLs. Defaults to the value
|
||||
of :attr:`charset`.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
"""
|
||||
return self.charset
|
||||
|
||||
@classmethod
|
||||
def from_values(cls, *args, **kwargs):
|
||||
"""Create a new request object based on the values provided. If
|
||||
environ is given missing values are filled from there. This method is
|
||||
useful for small scripts when you need to simulate a request from an URL.
|
||||
Do not use this method for unittesting, there is a full featured client
|
||||
object (:class:`Client`) that allows to create multipart requests,
|
||||
support for cookies etc.
|
||||
|
||||
This accepts the same options as the
|
||||
:class:`~werkzeug.test.EnvironBuilder`.
|
||||
|
||||
.. versionchanged:: 0.5
|
||||
This method now accepts the same arguments as
|
||||
:class:`~werkzeug.test.EnvironBuilder`. Because of this the
|
||||
`environ` parameter is now called `environ_overrides`.
|
||||
|
||||
:return: request object
|
||||
"""
|
||||
from ..test import EnvironBuilder
|
||||
|
||||
charset = kwargs.pop("charset", cls.charset)
|
||||
kwargs["charset"] = charset
|
||||
builder = EnvironBuilder(*args, **kwargs)
|
||||
try:
|
||||
return builder.get_request(cls)
|
||||
finally:
|
||||
builder.close()
|
||||
|
||||
@classmethod
|
||||
def application(cls, f):
|
||||
"""Decorate a function as responder that accepts the request as first
|
||||
argument. This works like the :func:`responder` decorator but the
|
||||
function is passed the request object as first argument and the
|
||||
request object will be closed automatically::
|
||||
|
||||
@Request.application
|
||||
def my_wsgi_app(request):
|
||||
return Response('Hello World!')
|
||||
|
||||
As of Werkzeug 0.14 HTTP exceptions are automatically caught and
|
||||
converted to responses instead of failing.
|
||||
|
||||
:param f: the WSGI callable to decorate
|
||||
:return: a new WSGI callable
|
||||
"""
|
||||
#: return a callable that wraps the -2nd argument with the request
|
||||
#: and calls the function with all the arguments up to that one and
|
||||
#: the request. The return value is then called with the latest
|
||||
#: two arguments. This makes it possible to use this decorator for
|
||||
#: both methods and standalone WSGI functions.
|
||||
from ..exceptions import HTTPException
|
||||
|
||||
def application(*args):
|
||||
request = cls(args[-2])
|
||||
with request:
|
||||
try:
|
||||
resp = f(*args[:-2] + (request,))
|
||||
except HTTPException as e:
|
||||
resp = e.get_response(args[-2])
|
||||
return resp(*args[-2:])
|
||||
|
||||
return update_wrapper(application, f)
|
||||
|
||||
def _get_file_stream(
|
||||
self, total_content_length, content_type, filename=None, content_length=None
|
||||
):
|
||||
"""Called to get a stream for the file upload.
|
||||
|
||||
This must provide a file-like class with `read()`, `readline()`
|
||||
and `seek()` methods that is both writeable and readable.
|
||||
|
||||
The default implementation returns a temporary file if the total
|
||||
content length is higher than 500KB. Because many browsers do not
|
||||
provide a content length for the files only the total content
|
||||
length matters.
|
||||
|
||||
:param total_content_length: the total content length of all the
|
||||
data in the request combined. This value
|
||||
is guaranteed to be there.
|
||||
:param content_type: the mimetype of the uploaded file.
|
||||
:param filename: the filename of the uploaded file. May be `None`.
|
||||
:param content_length: the length of this file. This value is usually
|
||||
not provided because webbrowsers do not provide
|
||||
this value.
|
||||
"""
|
||||
return default_stream_factory(
|
||||
total_content_length=total_content_length,
|
||||
filename=filename,
|
||||
content_type=content_type,
|
||||
content_length=content_length,
|
||||
)
|
||||
|
||||
@property
|
||||
def want_form_data_parsed(self):
|
||||
"""Returns True if the request method carries content. As of
|
||||
Werkzeug 0.9 this will be the case if a content type is transmitted.
|
||||
|
||||
.. versionadded:: 0.8
|
||||
"""
|
||||
return bool(self.environ.get("CONTENT_TYPE"))
|
||||
|
||||
def make_form_data_parser(self):
|
||||
"""Creates the form data parser. Instantiates the
|
||||
:attr:`form_data_parser_class` with some parameters.
|
||||
|
||||
.. versionadded:: 0.8
|
||||
"""
|
||||
return self.form_data_parser_class(
|
||||
self._get_file_stream,
|
||||
self.charset,
|
||||
self.encoding_errors,
|
||||
self.max_form_memory_size,
|
||||
self.max_content_length,
|
||||
self.parameter_storage_class,
|
||||
)
|
||||
|
||||
def _load_form_data(self):
|
||||
"""Method used internally to retrieve submitted data. After calling
|
||||
this sets `form` and `files` on the request object to multi dicts
|
||||
filled with the incoming form data. As a matter of fact the input
|
||||
stream will be empty afterwards. You can also call this method to
|
||||
force the parsing of the form data.
|
||||
|
||||
.. versionadded:: 0.8
|
||||
"""
|
||||
# abort early if we have already consumed the stream
|
||||
if "form" in self.__dict__:
|
||||
return
|
||||
|
||||
_assert_not_shallow(self)
|
||||
|
||||
if self.want_form_data_parsed:
|
||||
content_type = self.environ.get("CONTENT_TYPE", "")
|
||||
content_length = get_content_length(self.environ)
|
||||
mimetype, options = parse_options_header(content_type)
|
||||
parser = self.make_form_data_parser()
|
||||
data = parser.parse(
|
||||
self._get_stream_for_parsing(), mimetype, content_length, options
|
||||
)
|
||||
else:
|
||||
data = (
|
||||
self.stream,
|
||||
self.parameter_storage_class(),
|
||||
self.parameter_storage_class(),
|
||||
)
|
||||
|
||||
# inject the values into the instance dict so that we bypass
|
||||
# our cached_property non-data descriptor.
|
||||
d = self.__dict__
|
||||
d["stream"], d["form"], d["files"] = data
|
||||
|
||||
def _get_stream_for_parsing(self):
|
||||
"""This is the same as accessing :attr:`stream` with the difference
|
||||
that if it finds cached data from calling :meth:`get_data` first it
|
||||
will create a new stream out of the cached data.
|
||||
|
||||
.. versionadded:: 0.9.3
|
||||
"""
|
||||
cached_data = getattr(self, "_cached_data", None)
|
||||
if cached_data is not None:
|
||||
return BytesIO(cached_data)
|
||||
return self.stream
|
||||
|
||||
def close(self):
|
||||
"""Closes associated resources of this request object. This
|
||||
closes all file handles explicitly. You can also use the request
|
||||
object in a with statement which will automatically close it.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
"""
|
||||
files = self.__dict__.get("files")
|
||||
for _key, value in iter_multi_items(files or ()):
|
||||
value.close()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, tb):
|
||||
self.close()
|
||||
|
||||
@cached_property
|
||||
def stream(self):
|
||||
"""
|
||||
If the incoming form data was not encoded with a known mimetype
|
||||
the data is stored unmodified in this stream for consumption. Most
|
||||
of the time it is a better idea to use :attr:`data` which will give
|
||||
you that data as a string. The stream only returns the data once.
|
||||
|
||||
Unlike :attr:`input_stream` this stream is properly guarded that you
|
||||
can't accidentally read past the length of the input. Werkzeug will
|
||||
internally always refer to this stream to read data which makes it
|
||||
possible to wrap this object with a stream that does filtering.
|
||||
|
||||
.. versionchanged:: 0.9
|
||||
This stream is now always available but might be consumed by the
|
||||
form parser later on. Previously the stream was only set if no
|
||||
parsing happened.
|
||||
"""
|
||||
_assert_not_shallow(self)
|
||||
return get_input_stream(self.environ)
|
||||
|
||||
input_stream = environ_property(
|
||||
"wsgi.input",
|
||||
"""The WSGI input stream.
|
||||
|
||||
In general it's a bad idea to use this one because you can
|
||||
easily read past the boundary. Use the :attr:`stream`
|
||||
instead.""",
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def args(self):
|
||||
"""The parsed URL parameters (the part in the URL after the question
|
||||
mark).
|
||||
|
||||
By default an
|
||||
:class:`~werkzeug.datastructures.ImmutableMultiDict`
|
||||
is returned from this function. This can be changed by setting
|
||||
:attr:`parameter_storage_class` to a different type. This might
|
||||
be necessary if the order of the form data is important.
|
||||
"""
|
||||
return url_decode(
|
||||
wsgi_get_bytes(self.environ.get("QUERY_STRING", "")),
|
||||
self.url_charset,
|
||||
errors=self.encoding_errors,
|
||||
cls=self.parameter_storage_class,
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def data(self):
|
||||
"""
|
||||
Contains the incoming request data as string in case it came with
|
||||
a mimetype Werkzeug does not handle.
|
||||
"""
|
||||
|
||||
if self.disable_data_descriptor:
|
||||
raise AttributeError("data descriptor is disabled")
|
||||
# XXX: this should eventually be deprecated.
|
||||
|
||||
# We trigger form data parsing first which means that the descriptor
|
||||
# will not cache the data that would otherwise be .form or .files
|
||||
# data. This restores the behavior that was there in Werkzeug
|
||||
# before 0.9. New code should use :meth:`get_data` explicitly as
|
||||
# this will make behavior explicit.
|
||||
return self.get_data(parse_form_data=True)
|
||||
|
||||
def get_data(self, cache=True, as_text=False, parse_form_data=False):
|
||||
"""This reads the buffered incoming data from the client into one
|
||||
bytestring. By default this is cached but that behavior can be
|
||||
changed by setting `cache` to `False`.
|
||||
|
||||
Usually it's a bad idea to call this method without checking the
|
||||
content length first as a client could send dozens of megabytes or more
|
||||
to cause memory problems on the server.
|
||||
|
||||
Note that if the form data was already parsed this method will not
|
||||
return anything as form data parsing does not cache the data like
|
||||
this method does. To implicitly invoke form data parsing function
|
||||
set `parse_form_data` to `True`. When this is done the return value
|
||||
of this method will be an empty string if the form parser handles
|
||||
the data. This generally is not necessary as if the whole data is
|
||||
cached (which is the default) the form parser will used the cached
|
||||
data to parse the form data. Please be generally aware of checking
|
||||
the content length first in any case before calling this method
|
||||
to avoid exhausting server memory.
|
||||
|
||||
If `as_text` is set to `True` the return value will be a decoded
|
||||
unicode string.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
"""
|
||||
rv = getattr(self, "_cached_data", None)
|
||||
if rv is None:
|
||||
if parse_form_data:
|
||||
self._load_form_data()
|
||||
rv = self.stream.read()
|
||||
if cache:
|
||||
self._cached_data = rv
|
||||
if as_text:
|
||||
rv = rv.decode(self.charset, self.encoding_errors)
|
||||
return rv
|
||||
|
||||
@cached_property
|
||||
def form(self):
|
||||
"""The form parameters. By default an
|
||||
:class:`~werkzeug.datastructures.ImmutableMultiDict`
|
||||
is returned from this function. This can be changed by setting
|
||||
:attr:`parameter_storage_class` to a different type. This might
|
||||
be necessary if the order of the form data is important.
|
||||
|
||||
Please keep in mind that file uploads will not end up here, but instead
|
||||
in the :attr:`files` attribute.
|
||||
|
||||
.. versionchanged:: 0.9
|
||||
|
||||
Previous to Werkzeug 0.9 this would only contain form data for POST
|
||||
and PUT requests.
|
||||
"""
|
||||
self._load_form_data()
|
||||
return self.form
|
||||
|
||||
@cached_property
|
||||
def values(self):
|
||||
"""A :class:`werkzeug.datastructures.CombinedMultiDict` that combines
|
||||
:attr:`args` and :attr:`form`."""
|
||||
args = []
|
||||
for d in self.args, self.form:
|
||||
if not isinstance(d, MultiDict):
|
||||
d = MultiDict(d)
|
||||
args.append(d)
|
||||
return CombinedMultiDict(args)
|
||||
|
||||
@cached_property
|
||||
def files(self):
|
||||
""":class:`~werkzeug.datastructures.MultiDict` object containing
|
||||
all uploaded files. Each key in :attr:`files` is the name from the
|
||||
``<input type="file" name="">``. Each value in :attr:`files` is a
|
||||
Werkzeug :class:`~werkzeug.datastructures.FileStorage` object.
|
||||
|
||||
It basically behaves like a standard file object you know from Python,
|
||||
with the difference that it also has a
|
||||
:meth:`~werkzeug.datastructures.FileStorage.save` function that can
|
||||
store the file on the filesystem.
|
||||
|
||||
Note that :attr:`files` will only contain data if the request method was
|
||||
POST, PUT or PATCH and the ``<form>`` that posted to the request had
|
||||
``enctype="multipart/form-data"``. It will be empty otherwise.
|
||||
|
||||
See the :class:`~werkzeug.datastructures.MultiDict` /
|
||||
:class:`~werkzeug.datastructures.FileStorage` documentation for
|
||||
more details about the used data structure.
|
||||
"""
|
||||
self._load_form_data()
|
||||
return self.files
|
||||
|
||||
@cached_property
|
||||
def cookies(self):
|
||||
"""A :class:`dict` with the contents of all cookies transmitted with
|
||||
the request."""
|
||||
return parse_cookie(
|
||||
self.environ,
|
||||
self.charset,
|
||||
self.encoding_errors,
|
||||
cls=self.dict_storage_class,
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def headers(self):
|
||||
"""The headers from the WSGI environ as immutable
|
||||
:class:`~werkzeug.datastructures.EnvironHeaders`.
|
||||
"""
|
||||
return EnvironHeaders(self.environ)
|
||||
|
||||
@cached_property
|
||||
def path(self):
|
||||
"""Requested path as unicode. This works a bit like the regular path
|
||||
info in the WSGI environment but will always include a leading slash,
|
||||
even if the URL root is accessed.
|
||||
"""
|
||||
raw_path = wsgi_decoding_dance(
|
||||
self.environ.get("PATH_INFO") or "", self.charset, self.encoding_errors
|
||||
)
|
||||
return "/" + raw_path.lstrip("/")
|
||||
|
||||
@cached_property
|
||||
def full_path(self):
|
||||
"""Requested path as unicode, including the query string."""
|
||||
return self.path + u"?" + to_unicode(self.query_string, self.url_charset)
|
||||
|
||||
@cached_property
|
||||
def script_root(self):
|
||||
"""The root path of the script without the trailing slash."""
|
||||
raw_path = wsgi_decoding_dance(
|
||||
self.environ.get("SCRIPT_NAME") or "", self.charset, self.encoding_errors
|
||||
)
|
||||
return raw_path.rstrip("/")
|
||||
|
||||
@cached_property
|
||||
def url(self):
|
||||
"""The reconstructed current URL as IRI.
|
||||
See also: :attr:`trusted_hosts`.
|
||||
"""
|
||||
return get_current_url(self.environ, trusted_hosts=self.trusted_hosts)
|
||||
|
||||
@cached_property
|
||||
def base_url(self):
|
||||
"""Like :attr:`url` but without the querystring
|
||||
See also: :attr:`trusted_hosts`.
|
||||
"""
|
||||
return get_current_url(
|
||||
self.environ, strip_querystring=True, trusted_hosts=self.trusted_hosts
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def url_root(self):
|
||||
"""The full URL root (with hostname), this is the application
|
||||
root as IRI.
|
||||
See also: :attr:`trusted_hosts`.
|
||||
"""
|
||||
return get_current_url(self.environ, True, trusted_hosts=self.trusted_hosts)
|
||||
|
||||
@cached_property
|
||||
def host_url(self):
|
||||
"""Just the host with scheme as IRI.
|
||||
See also: :attr:`trusted_hosts`.
|
||||
"""
|
||||
return get_current_url(
|
||||
self.environ, host_only=True, trusted_hosts=self.trusted_hosts
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def host(self):
|
||||
"""Just the host including the port if available.
|
||||
See also: :attr:`trusted_hosts`.
|
||||
"""
|
||||
return get_host(self.environ, trusted_hosts=self.trusted_hosts)
|
||||
|
||||
query_string = environ_property(
|
||||
"QUERY_STRING",
|
||||
"",
|
||||
read_only=True,
|
||||
load_func=wsgi_get_bytes,
|
||||
doc="The URL parameters as raw bytestring.",
|
||||
)
|
||||
method = environ_property(
|
||||
"REQUEST_METHOD",
|
||||
"GET",
|
||||
read_only=True,
|
||||
load_func=lambda x: x.upper(),
|
||||
doc="The request method. (For example ``'GET'`` or ``'POST'``).",
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def access_route(self):
|
||||
"""If a forwarded header exists this is a list of all ip addresses
|
||||
from the client ip to the last proxy server.
|
||||
"""
|
||||
if "HTTP_X_FORWARDED_FOR" in self.environ:
|
||||
addr = self.environ["HTTP_X_FORWARDED_FOR"].split(",")
|
||||
return self.list_storage_class([x.strip() for x in addr])
|
||||
elif "REMOTE_ADDR" in self.environ:
|
||||
return self.list_storage_class([self.environ["REMOTE_ADDR"]])
|
||||
return self.list_storage_class()
|
||||
|
||||
@property
|
||||
def remote_addr(self):
|
||||
"""The remote address of the client."""
|
||||
return self.environ.get("REMOTE_ADDR")
|
||||
|
||||
remote_user = environ_property(
|
||||
"REMOTE_USER",
|
||||
doc="""If the server supports user authentication, and the
|
||||
script is protected, this attribute contains the username the
|
||||
user has authenticated as.""",
|
||||
)
|
||||
|
||||
scheme = environ_property(
|
||||
"wsgi.url_scheme",
|
||||
doc="""
|
||||
URL scheme (http or https).
|
||||
|
||||
.. versionadded:: 0.7""",
|
||||
)
|
||||
|
||||
@property
|
||||
def is_xhr(self):
|
||||
"""True if the request was triggered via a JavaScript XMLHttpRequest.
|
||||
This only works with libraries that support the ``X-Requested-With``
|
||||
header and set it to "XMLHttpRequest". Libraries that do that are
|
||||
prototype, jQuery and Mochikit and probably some more.
|
||||
|
||||
.. deprecated:: 0.13
|
||||
``X-Requested-With`` is not standard and is unreliable. You
|
||||
may be able to use :attr:`AcceptMixin.accept_mimetypes`
|
||||
instead.
|
||||
"""
|
||||
warnings.warn(
|
||||
"'Request.is_xhr' is deprecated as of version 0.13 and will"
|
||||
" be removed in version 1.0. The 'X-Requested-With' header"
|
||||
" is not standard and is unreliable. You may be able to use"
|
||||
" 'accept_mimetypes' instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return self.environ.get("HTTP_X_REQUESTED_WITH", "").lower() == "xmlhttprequest"
|
||||
|
||||
is_secure = property(
|
||||
lambda self: self.environ["wsgi.url_scheme"] == "https",
|
||||
doc="`True` if the request is secure.",
|
||||
)
|
||||
is_multithread = environ_property(
|
||||
"wsgi.multithread",
|
||||
doc="""boolean that is `True` if the application is served by a
|
||||
multithreaded WSGI server.""",
|
||||
)
|
||||
is_multiprocess = environ_property(
|
||||
"wsgi.multiprocess",
|
||||
doc="""boolean that is `True` if the application is served by a
|
||||
WSGI server that spawns multiple processes.""",
|
||||
)
|
||||
is_run_once = environ_property(
|
||||
"wsgi.run_once",
|
||||
doc="""boolean that is `True` if the application will be
|
||||
executed only once in a process lifetime. This is the case for
|
||||
CGI for example, but it's not guaranteed that the execution only
|
||||
happens one time.""",
|
||||
)
|
||||
|
||||
|
||||
def _assert_not_shallow(request):
|
||||
if request.shallow:
|
||||
raise RuntimeError(
|
||||
"A shallow request tried to consume form data. If you really"
|
||||
" want to do that, set `shallow` to False."
|
||||
)
|
||||
702
python/werkzeug/wrappers/base_response.py
Normal file
702
python/werkzeug/wrappers/base_response.py
Normal file
@@ -0,0 +1,702 @@
|
||||
import warnings
|
||||
|
||||
from .._compat import integer_types
|
||||
from .._compat import string_types
|
||||
from .._compat import text_type
|
||||
from .._compat import to_bytes
|
||||
from .._compat import to_native
|
||||
from ..datastructures import Headers
|
||||
from ..http import dump_cookie
|
||||
from ..http import HTTP_STATUS_CODES
|
||||
from ..http import remove_entity_headers
|
||||
from ..urls import iri_to_uri
|
||||
from ..urls import url_join
|
||||
from ..utils import get_content_type
|
||||
from ..wsgi import ClosingIterator
|
||||
from ..wsgi import get_current_url
|
||||
|
||||
|
||||
def _run_wsgi_app(*args):
|
||||
"""This function replaces itself to ensure that the test module is not
|
||||
imported unless required. DO NOT USE!
|
||||
"""
|
||||
global _run_wsgi_app
|
||||
from ..test import run_wsgi_app as _run_wsgi_app
|
||||
|
||||
return _run_wsgi_app(*args)
|
||||
|
||||
|
||||
def _warn_if_string(iterable):
|
||||
"""Helper for the response objects to check if the iterable returned
|
||||
to the WSGI server is not a string.
|
||||
"""
|
||||
if isinstance(iterable, string_types):
|
||||
warnings.warn(
|
||||
"Response iterable was set to a string. This will appear to"
|
||||
" work but means that the server will send the data to the"
|
||||
" client one character at a time. This is almost never"
|
||||
" intended behavior, use 'response.data' to assign strings"
|
||||
" to the response object.",
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
|
||||
def _iter_encoded(iterable, charset):
|
||||
for item in iterable:
|
||||
if isinstance(item, text_type):
|
||||
yield item.encode(charset)
|
||||
else:
|
||||
yield item
|
||||
|
||||
|
||||
def _clean_accept_ranges(accept_ranges):
|
||||
if accept_ranges is True:
|
||||
return "bytes"
|
||||
elif accept_ranges is False:
|
||||
return "none"
|
||||
elif isinstance(accept_ranges, text_type):
|
||||
return to_native(accept_ranges)
|
||||
raise ValueError("Invalid accept_ranges value")
|
||||
|
||||
|
||||
class BaseResponse(object):
|
||||
"""Base response class. The most important fact about a response object
|
||||
is that it's a regular WSGI application. It's initialized with a couple
|
||||
of response parameters (headers, body, status code etc.) and will start a
|
||||
valid WSGI response when called with the environ and start response
|
||||
callable.
|
||||
|
||||
Because it's a WSGI application itself processing usually ends before the
|
||||
actual response is sent to the server. This helps debugging systems
|
||||
because they can catch all the exceptions before responses are started.
|
||||
|
||||
Here a small example WSGI application that takes advantage of the
|
||||
response objects::
|
||||
|
||||
from werkzeug.wrappers import BaseResponse as Response
|
||||
|
||||
def index():
|
||||
return Response('Index page')
|
||||
|
||||
def application(environ, start_response):
|
||||
path = environ.get('PATH_INFO') or '/'
|
||||
if path == '/':
|
||||
response = index()
|
||||
else:
|
||||
response = Response('Not Found', status=404)
|
||||
return response(environ, start_response)
|
||||
|
||||
Like :class:`BaseRequest` which object is lacking a lot of functionality
|
||||
implemented in mixins. This gives you a better control about the actual
|
||||
API of your response objects, so you can create subclasses and add custom
|
||||
functionality. A full featured response object is available as
|
||||
:class:`Response` which implements a couple of useful mixins.
|
||||
|
||||
To enforce a new type of already existing responses you can use the
|
||||
:meth:`force_type` method. This is useful if you're working with different
|
||||
subclasses of response objects and you want to post process them with a
|
||||
known interface.
|
||||
|
||||
Per default the response object will assume all the text data is `utf-8`
|
||||
encoded. Please refer to :doc:`the unicode chapter </unicode>` for more
|
||||
details about customizing the behavior.
|
||||
|
||||
Response can be any kind of iterable or string. If it's a string it's
|
||||
considered being an iterable with one item which is the string passed.
|
||||
Headers can be a list of tuples or a
|
||||
:class:`~werkzeug.datastructures.Headers` object.
|
||||
|
||||
Special note for `mimetype` and `content_type`: For most mime types
|
||||
`mimetype` and `content_type` work the same, the difference affects
|
||||
only 'text' mimetypes. If the mimetype passed with `mimetype` is a
|
||||
mimetype starting with `text/`, the charset parameter of the response
|
||||
object is appended to it. In contrast the `content_type` parameter is
|
||||
always added as header unmodified.
|
||||
|
||||
.. versionchanged:: 0.5
|
||||
the `direct_passthrough` parameter was added.
|
||||
|
||||
:param response: a string or response iterable.
|
||||
:param status: a string with a status or an integer with the status code.
|
||||
:param headers: a list of headers or a
|
||||
:class:`~werkzeug.datastructures.Headers` object.
|
||||
:param mimetype: the mimetype for the response. See notice above.
|
||||
:param content_type: the content type for the response. See notice above.
|
||||
:param direct_passthrough: if set to `True` :meth:`iter_encoded` is not
|
||||
called before iteration which makes it
|
||||
possible to pass special iterators through
|
||||
unchanged (see :func:`wrap_file` for more
|
||||
details.)
|
||||
"""
|
||||
|
||||
#: the charset of the response.
|
||||
charset = "utf-8"
|
||||
|
||||
#: the default status if none is provided.
|
||||
default_status = 200
|
||||
|
||||
#: the default mimetype if none is provided.
|
||||
default_mimetype = "text/plain"
|
||||
|
||||
#: if set to `False` accessing properties on the response object will
|
||||
#: not try to consume the response iterator and convert it into a list.
|
||||
#:
|
||||
#: .. versionadded:: 0.6.2
|
||||
#:
|
||||
#: That attribute was previously called `implicit_seqence_conversion`.
|
||||
#: (Notice the typo). If you did use this feature, you have to adapt
|
||||
#: your code to the name change.
|
||||
implicit_sequence_conversion = True
|
||||
|
||||
#: Should this response object correct the location header to be RFC
|
||||
#: conformant? This is true by default.
|
||||
#:
|
||||
#: .. versionadded:: 0.8
|
||||
autocorrect_location_header = True
|
||||
|
||||
#: Should this response object automatically set the content-length
|
||||
#: header if possible? This is true by default.
|
||||
#:
|
||||
#: .. versionadded:: 0.8
|
||||
automatically_set_content_length = True
|
||||
|
||||
#: Warn if a cookie header exceeds this size. The default, 4093, should be
|
||||
#: safely `supported by most browsers <cookie_>`_. A cookie larger than
|
||||
#: this size will still be sent, but it may be ignored or handled
|
||||
#: incorrectly by some browsers. Set to 0 to disable this check.
|
||||
#:
|
||||
#: .. versionadded:: 0.13
|
||||
#:
|
||||
#: .. _`cookie`: http://browsercookielimits.squawky.net/
|
||||
max_cookie_size = 4093
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
response=None,
|
||||
status=None,
|
||||
headers=None,
|
||||
mimetype=None,
|
||||
content_type=None,
|
||||
direct_passthrough=False,
|
||||
):
|
||||
if isinstance(headers, Headers):
|
||||
self.headers = headers
|
||||
elif not headers:
|
||||
self.headers = Headers()
|
||||
else:
|
||||
self.headers = Headers(headers)
|
||||
|
||||
if content_type is None:
|
||||
if mimetype is None and "content-type" not in self.headers:
|
||||
mimetype = self.default_mimetype
|
||||
if mimetype is not None:
|
||||
mimetype = get_content_type(mimetype, self.charset)
|
||||
content_type = mimetype
|
||||
if content_type is not None:
|
||||
self.headers["Content-Type"] = content_type
|
||||
if status is None:
|
||||
status = self.default_status
|
||||
if isinstance(status, integer_types):
|
||||
self.status_code = status
|
||||
else:
|
||||
self.status = status
|
||||
|
||||
self.direct_passthrough = direct_passthrough
|
||||
self._on_close = []
|
||||
|
||||
# we set the response after the headers so that if a class changes
|
||||
# the charset attribute, the data is set in the correct charset.
|
||||
if response is None:
|
||||
self.response = []
|
||||
elif isinstance(response, (text_type, bytes, bytearray)):
|
||||
self.set_data(response)
|
||||
else:
|
||||
self.response = response
|
||||
|
||||
def call_on_close(self, func):
|
||||
"""Adds a function to the internal list of functions that should
|
||||
be called as part of closing down the response. Since 0.7 this
|
||||
function also returns the function that was passed so that this
|
||||
can be used as a decorator.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
"""
|
||||
self._on_close.append(func)
|
||||
return func
|
||||
|
||||
def __repr__(self):
|
||||
if self.is_sequence:
|
||||
body_info = "%d bytes" % sum(map(len, self.iter_encoded()))
|
||||
else:
|
||||
body_info = "streamed" if self.is_streamed else "likely-streamed"
|
||||
return "<%s %s [%s]>" % (self.__class__.__name__, body_info, self.status)
|
||||
|
||||
@classmethod
|
||||
def force_type(cls, response, environ=None):
|
||||
"""Enforce that the WSGI response is a response object of the current
|
||||
type. Werkzeug will use the :class:`BaseResponse` internally in many
|
||||
situations like the exceptions. If you call :meth:`get_response` on an
|
||||
exception you will get back a regular :class:`BaseResponse` object, even
|
||||
if you are using a custom subclass.
|
||||
|
||||
This method can enforce a given response type, and it will also
|
||||
convert arbitrary WSGI callables into response objects if an environ
|
||||
is provided::
|
||||
|
||||
# convert a Werkzeug response object into an instance of the
|
||||
# MyResponseClass subclass.
|
||||
response = MyResponseClass.force_type(response)
|
||||
|
||||
# convert any WSGI application into a response object
|
||||
response = MyResponseClass.force_type(response, environ)
|
||||
|
||||
This is especially useful if you want to post-process responses in
|
||||
the main dispatcher and use functionality provided by your subclass.
|
||||
|
||||
Keep in mind that this will modify response objects in place if
|
||||
possible!
|
||||
|
||||
:param response: a response object or wsgi application.
|
||||
:param environ: a WSGI environment object.
|
||||
:return: a response object.
|
||||
"""
|
||||
if not isinstance(response, BaseResponse):
|
||||
if environ is None:
|
||||
raise TypeError(
|
||||
"cannot convert WSGI application into response"
|
||||
" objects without an environ"
|
||||
)
|
||||
response = BaseResponse(*_run_wsgi_app(response, environ))
|
||||
response.__class__ = cls
|
||||
return response
|
||||
|
||||
@classmethod
|
||||
def from_app(cls, app, environ, buffered=False):
|
||||
"""Create a new response object from an application output. This
|
||||
works best if you pass it an application that returns a generator all
|
||||
the time. Sometimes applications may use the `write()` callable
|
||||
returned by the `start_response` function. This tries to resolve such
|
||||
edge cases automatically. But if you don't get the expected output
|
||||
you should set `buffered` to `True` which enforces buffering.
|
||||
|
||||
:param app: the WSGI application to execute.
|
||||
:param environ: the WSGI environment to execute against.
|
||||
:param buffered: set to `True` to enforce buffering.
|
||||
:return: a response object.
|
||||
"""
|
||||
return cls(*_run_wsgi_app(app, environ, buffered))
|
||||
|
||||
def _get_status_code(self):
|
||||
return self._status_code
|
||||
|
||||
def _set_status_code(self, code):
|
||||
self._status_code = code
|
||||
try:
|
||||
self._status = "%d %s" % (code, HTTP_STATUS_CODES[code].upper())
|
||||
except KeyError:
|
||||
self._status = "%d UNKNOWN" % code
|
||||
|
||||
status_code = property(
|
||||
_get_status_code, _set_status_code, doc="The HTTP Status code as number"
|
||||
)
|
||||
del _get_status_code, _set_status_code
|
||||
|
||||
def _get_status(self):
|
||||
return self._status
|
||||
|
||||
def _set_status(self, value):
|
||||
try:
|
||||
self._status = to_native(value)
|
||||
except AttributeError:
|
||||
raise TypeError("Invalid status argument")
|
||||
|
||||
try:
|
||||
self._status_code = int(self._status.split(None, 1)[0])
|
||||
except ValueError:
|
||||
self._status_code = 0
|
||||
self._status = "0 %s" % self._status
|
||||
except IndexError:
|
||||
raise ValueError("Empty status argument")
|
||||
|
||||
status = property(_get_status, _set_status, doc="The HTTP Status code")
|
||||
del _get_status, _set_status
|
||||
|
||||
def get_data(self, as_text=False):
|
||||
"""The string representation of the request body. Whenever you call
|
||||
this property the request iterable is encoded and flattened. This
|
||||
can lead to unwanted behavior if you stream big data.
|
||||
|
||||
This behavior can be disabled by setting
|
||||
:attr:`implicit_sequence_conversion` to `False`.
|
||||
|
||||
If `as_text` is set to `True` the return value will be a decoded
|
||||
unicode string.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
"""
|
||||
self._ensure_sequence()
|
||||
rv = b"".join(self.iter_encoded())
|
||||
if as_text:
|
||||
rv = rv.decode(self.charset)
|
||||
return rv
|
||||
|
||||
def set_data(self, value):
|
||||
"""Sets a new string as response. The value set must either by a
|
||||
unicode or bytestring. If a unicode string is set it's encoded
|
||||
automatically to the charset of the response (utf-8 by default).
|
||||
|
||||
.. versionadded:: 0.9
|
||||
"""
|
||||
# if an unicode string is set, it's encoded directly so that we
|
||||
# can set the content length
|
||||
if isinstance(value, text_type):
|
||||
value = value.encode(self.charset)
|
||||
else:
|
||||
value = bytes(value)
|
||||
self.response = [value]
|
||||
if self.automatically_set_content_length:
|
||||
self.headers["Content-Length"] = str(len(value))
|
||||
|
||||
data = property(
|
||||
get_data,
|
||||
set_data,
|
||||
doc="A descriptor that calls :meth:`get_data` and :meth:`set_data`.",
|
||||
)
|
||||
|
||||
def calculate_content_length(self):
|
||||
"""Returns the content length if available or `None` otherwise."""
|
||||
try:
|
||||
self._ensure_sequence()
|
||||
except RuntimeError:
|
||||
return None
|
||||
return sum(len(x) for x in self.iter_encoded())
|
||||
|
||||
def _ensure_sequence(self, mutable=False):
|
||||
"""This method can be called by methods that need a sequence. If
|
||||
`mutable` is true, it will also ensure that the response sequence
|
||||
is a standard Python list.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
"""
|
||||
if self.is_sequence:
|
||||
# if we need a mutable object, we ensure it's a list.
|
||||
if mutable and not isinstance(self.response, list):
|
||||
self.response = list(self.response)
|
||||
return
|
||||
if self.direct_passthrough:
|
||||
raise RuntimeError(
|
||||
"Attempted implicit sequence conversion but the"
|
||||
" response object is in direct passthrough mode."
|
||||
)
|
||||
if not self.implicit_sequence_conversion:
|
||||
raise RuntimeError(
|
||||
"The response object required the iterable to be a"
|
||||
" sequence, but the implicit conversion was disabled."
|
||||
" Call make_sequence() yourself."
|
||||
)
|
||||
self.make_sequence()
|
||||
|
||||
def make_sequence(self):
|
||||
"""Converts the response iterator in a list. By default this happens
|
||||
automatically if required. If `implicit_sequence_conversion` is
|
||||
disabled, this method is not automatically called and some properties
|
||||
might raise exceptions. This also encodes all the items.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
"""
|
||||
if not self.is_sequence:
|
||||
# if we consume an iterable we have to ensure that the close
|
||||
# method of the iterable is called if available when we tear
|
||||
# down the response
|
||||
close = getattr(self.response, "close", None)
|
||||
self.response = list(self.iter_encoded())
|
||||
if close is not None:
|
||||
self.call_on_close(close)
|
||||
|
||||
def iter_encoded(self):
|
||||
"""Iter the response encoded with the encoding of the response.
|
||||
If the response object is invoked as WSGI application the return
|
||||
value of this method is used as application iterator unless
|
||||
:attr:`direct_passthrough` was activated.
|
||||
"""
|
||||
if __debug__:
|
||||
_warn_if_string(self.response)
|
||||
# Encode in a separate function so that self.response is fetched
|
||||
# early. This allows us to wrap the response with the return
|
||||
# value from get_app_iter or iter_encoded.
|
||||
return _iter_encoded(self.response, self.charset)
|
||||
|
||||
def set_cookie(
|
||||
self,
|
||||
key,
|
||||
value="",
|
||||
max_age=None,
|
||||
expires=None,
|
||||
path="/",
|
||||
domain=None,
|
||||
secure=False,
|
||||
httponly=False,
|
||||
samesite=None,
|
||||
):
|
||||
"""Sets a cookie. The parameters are the same as in the cookie `Morsel`
|
||||
object in the Python standard library but it accepts unicode data, too.
|
||||
|
||||
A warning is raised if the size of the cookie header exceeds
|
||||
:attr:`max_cookie_size`, but the header will still be set.
|
||||
|
||||
:param key: the key (name) of the cookie to be set.
|
||||
:param value: the value of the cookie.
|
||||
:param max_age: should be a number of seconds, or `None` (default) if
|
||||
the cookie should last only as long as the client's
|
||||
browser session.
|
||||
:param expires: should be a `datetime` object or UNIX timestamp.
|
||||
:param path: limits the cookie to a given path, per default it will
|
||||
span the whole domain.
|
||||
:param domain: if you want to set a cross-domain cookie. For example,
|
||||
``domain=".example.com"`` will set a cookie that is
|
||||
readable by the domain ``www.example.com``,
|
||||
``foo.example.com`` etc. Otherwise, a cookie will only
|
||||
be readable by the domain that set it.
|
||||
:param secure: If `True`, the cookie will only be available via HTTPS
|
||||
:param httponly: disallow JavaScript to access the cookie. This is an
|
||||
extension to the cookie standard and probably not
|
||||
supported by all browsers.
|
||||
:param samesite: Limits the scope of the cookie such that it will only
|
||||
be attached to requests if those requests are
|
||||
"same-site".
|
||||
"""
|
||||
self.headers.add(
|
||||
"Set-Cookie",
|
||||
dump_cookie(
|
||||
key,
|
||||
value=value,
|
||||
max_age=max_age,
|
||||
expires=expires,
|
||||
path=path,
|
||||
domain=domain,
|
||||
secure=secure,
|
||||
httponly=httponly,
|
||||
charset=self.charset,
|
||||
max_size=self.max_cookie_size,
|
||||
samesite=samesite,
|
||||
),
|
||||
)
|
||||
|
||||
def delete_cookie(self, key, path="/", domain=None):
|
||||
"""Delete a cookie. Fails silently if key doesn't exist.
|
||||
|
||||
:param key: the key (name) of the cookie to be deleted.
|
||||
:param path: if the cookie that should be deleted was limited to a
|
||||
path, the path has to be defined here.
|
||||
:param domain: if the cookie that should be deleted was limited to a
|
||||
domain, that domain has to be defined here.
|
||||
"""
|
||||
self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain)
|
||||
|
||||
@property
|
||||
def is_streamed(self):
|
||||
"""If the response is streamed (the response is not an iterable with
|
||||
a length information) this property is `True`. In this case streamed
|
||||
means that there is no information about the number of iterations.
|
||||
This is usually `True` if a generator is passed to the response object.
|
||||
|
||||
This is useful for checking before applying some sort of post
|
||||
filtering that should not take place for streamed responses.
|
||||
"""
|
||||
try:
|
||||
len(self.response)
|
||||
except (TypeError, AttributeError):
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
def is_sequence(self):
|
||||
"""If the iterator is buffered, this property will be `True`. A
|
||||
response object will consider an iterator to be buffered if the
|
||||
response attribute is a list or tuple.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
"""
|
||||
return isinstance(self.response, (tuple, list))
|
||||
|
||||
def close(self):
|
||||
"""Close the wrapped response if possible. You can also use the object
|
||||
in a with statement which will automatically close it.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
Can now be used in a with statement.
|
||||
"""
|
||||
if hasattr(self.response, "close"):
|
||||
self.response.close()
|
||||
for func in self._on_close:
|
||||
func()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, tb):
|
||||
self.close()
|
||||
|
||||
def freeze(self):
|
||||
"""Call this method if you want to make your response object ready for
|
||||
being pickled. This buffers the generator if there is one. It will
|
||||
also set the `Content-Length` header to the length of the body.
|
||||
|
||||
.. versionchanged:: 0.6
|
||||
The `Content-Length` header is now set.
|
||||
"""
|
||||
# we explicitly set the length to a list of the *encoded* response
|
||||
# iterator. Even if the implicit sequence conversion is disabled.
|
||||
self.response = list(self.iter_encoded())
|
||||
self.headers["Content-Length"] = str(sum(map(len, self.response)))
|
||||
|
||||
def get_wsgi_headers(self, environ):
|
||||
"""This is automatically called right before the response is started
|
||||
and returns headers modified for the given environment. It returns a
|
||||
copy of the headers from the response with some modifications applied
|
||||
if necessary.
|
||||
|
||||
For example the location header (if present) is joined with the root
|
||||
URL of the environment. Also the content length is automatically set
|
||||
to zero here for certain status codes.
|
||||
|
||||
.. versionchanged:: 0.6
|
||||
Previously that function was called `fix_headers` and modified
|
||||
the response object in place. Also since 0.6, IRIs in location
|
||||
and content-location headers are handled properly.
|
||||
|
||||
Also starting with 0.6, Werkzeug will attempt to set the content
|
||||
length if it is able to figure it out on its own. This is the
|
||||
case if all the strings in the response iterable are already
|
||||
encoded and the iterable is buffered.
|
||||
|
||||
:param environ: the WSGI environment of the request.
|
||||
:return: returns a new :class:`~werkzeug.datastructures.Headers`
|
||||
object.
|
||||
"""
|
||||
headers = Headers(self.headers)
|
||||
location = None
|
||||
content_location = None
|
||||
content_length = None
|
||||
status = self.status_code
|
||||
|
||||
# iterate over the headers to find all values in one go. Because
|
||||
# get_wsgi_headers is used each response that gives us a tiny
|
||||
# speedup.
|
||||
for key, value in headers:
|
||||
ikey = key.lower()
|
||||
if ikey == u"location":
|
||||
location = value
|
||||
elif ikey == u"content-location":
|
||||
content_location = value
|
||||
elif ikey == u"content-length":
|
||||
content_length = value
|
||||
|
||||
# make sure the location header is an absolute URL
|
||||
if location is not None:
|
||||
old_location = location
|
||||
if isinstance(location, text_type):
|
||||
# Safe conversion is necessary here as we might redirect
|
||||
# to a broken URI scheme (for instance itms-services).
|
||||
location = iri_to_uri(location, safe_conversion=True)
|
||||
|
||||
if self.autocorrect_location_header:
|
||||
current_url = get_current_url(environ, strip_querystring=True)
|
||||
if isinstance(current_url, text_type):
|
||||
current_url = iri_to_uri(current_url)
|
||||
location = url_join(current_url, location)
|
||||
if location != old_location:
|
||||
headers["Location"] = location
|
||||
|
||||
# make sure the content location is a URL
|
||||
if content_location is not None and isinstance(content_location, text_type):
|
||||
headers["Content-Location"] = iri_to_uri(content_location)
|
||||
|
||||
if 100 <= status < 200 or status == 204:
|
||||
# Per section 3.3.2 of RFC 7230, "a server MUST NOT send a
|
||||
# Content-Length header field in any response with a status
|
||||
# code of 1xx (Informational) or 204 (No Content)."
|
||||
headers.remove("Content-Length")
|
||||
elif status == 304:
|
||||
remove_entity_headers(headers)
|
||||
|
||||
# if we can determine the content length automatically, we
|
||||
# should try to do that. But only if this does not involve
|
||||
# flattening the iterator or encoding of unicode strings in
|
||||
# the response. We however should not do that if we have a 304
|
||||
# response.
|
||||
if (
|
||||
self.automatically_set_content_length
|
||||
and self.is_sequence
|
||||
and content_length is None
|
||||
and status not in (204, 304)
|
||||
and not (100 <= status < 200)
|
||||
):
|
||||
try:
|
||||
content_length = sum(len(to_bytes(x, "ascii")) for x in self.response)
|
||||
except UnicodeError:
|
||||
# aha, something non-bytestringy in there, too bad, we
|
||||
# can't safely figure out the length of the response.
|
||||
pass
|
||||
else:
|
||||
headers["Content-Length"] = str(content_length)
|
||||
|
||||
return headers
|
||||
|
||||
def get_app_iter(self, environ):
|
||||
"""Returns the application iterator for the given environ. Depending
|
||||
on the request method and the current status code the return value
|
||||
might be an empty response rather than the one from the response.
|
||||
|
||||
If the request method is `HEAD` or the status code is in a range
|
||||
where the HTTP specification requires an empty response, an empty
|
||||
iterable is returned.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
|
||||
:param environ: the WSGI environment of the request.
|
||||
:return: a response iterable.
|
||||
"""
|
||||
status = self.status_code
|
||||
if (
|
||||
environ["REQUEST_METHOD"] == "HEAD"
|
||||
or 100 <= status < 200
|
||||
or status in (204, 304)
|
||||
):
|
||||
iterable = ()
|
||||
elif self.direct_passthrough:
|
||||
if __debug__:
|
||||
_warn_if_string(self.response)
|
||||
return self.response
|
||||
else:
|
||||
iterable = self.iter_encoded()
|
||||
return ClosingIterator(iterable, self.close)
|
||||
|
||||
def get_wsgi_response(self, environ):
|
||||
"""Returns the final WSGI response as tuple. The first item in
|
||||
the tuple is the application iterator, the second the status and
|
||||
the third the list of headers. The response returned is created
|
||||
specially for the given environment. For example if the request
|
||||
method in the WSGI environment is ``'HEAD'`` the response will
|
||||
be empty and only the headers and status code will be present.
|
||||
|
||||
.. versionadded:: 0.6
|
||||
|
||||
:param environ: the WSGI environment of the request.
|
||||
:return: an ``(app_iter, status, headers)`` tuple.
|
||||
"""
|
||||
headers = self.get_wsgi_headers(environ)
|
||||
app_iter = self.get_app_iter(environ)
|
||||
return app_iter, self.status, headers.to_wsgi_list()
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
"""Process this response as WSGI application.
|
||||
|
||||
:param environ: the WSGI environment.
|
||||
:param start_response: the response callable provided by the WSGI
|
||||
server.
|
||||
:return: an application iterator
|
||||
"""
|
||||
app_iter, status, headers = self.get_wsgi_response(environ)
|
||||
start_response(status, headers)
|
||||
return app_iter
|
||||
322
python/werkzeug/wrappers/common_descriptors.py
Normal file
322
python/werkzeug/wrappers/common_descriptors.py
Normal file
@@ -0,0 +1,322 @@
|
||||
from datetime import datetime
|
||||
from datetime import timedelta
|
||||
|
||||
from .._compat import string_types
|
||||
from ..datastructures import CallbackDict
|
||||
from ..http import dump_age
|
||||
from ..http import dump_header
|
||||
from ..http import dump_options_header
|
||||
from ..http import http_date
|
||||
from ..http import parse_age
|
||||
from ..http import parse_date
|
||||
from ..http import parse_options_header
|
||||
from ..http import parse_set_header
|
||||
from ..utils import cached_property
|
||||
from ..utils import environ_property
|
||||
from ..utils import get_content_type
|
||||
from ..utils import header_property
|
||||
from ..wsgi import get_content_length
|
||||
|
||||
|
||||
class CommonRequestDescriptorsMixin(object):
|
||||
"""A mixin for :class:`BaseRequest` subclasses. Request objects that
|
||||
mix this class in will automatically get descriptors for a couple of
|
||||
HTTP headers with automatic type conversion.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
"""
|
||||
|
||||
content_type = environ_property(
|
||||
"CONTENT_TYPE",
|
||||
doc="""The Content-Type entity-header field indicates the media
|
||||
type of the entity-body sent to the recipient or, in the case of
|
||||
the HEAD method, the media type that would have been sent had
|
||||
the request been a GET.""",
|
||||
)
|
||||
|
||||
@cached_property
|
||||
def content_length(self):
|
||||
"""The Content-Length entity-header field indicates the size of the
|
||||
entity-body in bytes or, in the case of the HEAD method, the size of
|
||||
the entity-body that would have been sent had the request been a
|
||||
GET.
|
||||
"""
|
||||
return get_content_length(self.environ)
|
||||
|
||||
content_encoding = environ_property(
|
||||
"HTTP_CONTENT_ENCODING",
|
||||
doc="""The Content-Encoding entity-header field is used as a
|
||||
modifier to the media-type. When present, its value indicates
|
||||
what additional content codings have been applied to the
|
||||
entity-body, and thus what decoding mechanisms must be applied
|
||||
in order to obtain the media-type referenced by the Content-Type
|
||||
header field.
|
||||
|
||||
.. versionadded:: 0.9""",
|
||||
)
|
||||
content_md5 = environ_property(
|
||||
"HTTP_CONTENT_MD5",
|
||||
doc="""The Content-MD5 entity-header field, as defined in
|
||||
RFC 1864, is an MD5 digest of the entity-body for the purpose of
|
||||
providing an end-to-end message integrity check (MIC) of the
|
||||
entity-body. (Note: a MIC is good for detecting accidental
|
||||
modification of the entity-body in transit, but is not proof
|
||||
against malicious attacks.)
|
||||
|
||||
.. versionadded:: 0.9""",
|
||||
)
|
||||
referrer = environ_property(
|
||||
"HTTP_REFERER",
|
||||
doc="""The Referer[sic] request-header field allows the client
|
||||
to specify, for the server's benefit, the address (URI) of the
|
||||
resource from which the Request-URI was obtained (the
|
||||
"referrer", although the header field is misspelled).""",
|
||||
)
|
||||
date = environ_property(
|
||||
"HTTP_DATE",
|
||||
None,
|
||||
parse_date,
|
||||
doc="""The Date general-header field represents the date and
|
||||
time at which the message was originated, having the same
|
||||
semantics as orig-date in RFC 822.""",
|
||||
)
|
||||
max_forwards = environ_property(
|
||||
"HTTP_MAX_FORWARDS",
|
||||
None,
|
||||
int,
|
||||
doc="""The Max-Forwards request-header field provides a
|
||||
mechanism with the TRACE and OPTIONS methods to limit the number
|
||||
of proxies or gateways that can forward the request to the next
|
||||
inbound server.""",
|
||||
)
|
||||
|
||||
def _parse_content_type(self):
|
||||
if not hasattr(self, "_parsed_content_type"):
|
||||
self._parsed_content_type = parse_options_header(
|
||||
self.environ.get("CONTENT_TYPE", "")
|
||||
)
|
||||
|
||||
@property
|
||||
def mimetype(self):
|
||||
"""Like :attr:`content_type`, but without parameters (eg, without
|
||||
charset, type etc.) and always lowercase. For example if the content
|
||||
type is ``text/HTML; charset=utf-8`` the mimetype would be
|
||||
``'text/html'``.
|
||||
"""
|
||||
self._parse_content_type()
|
||||
return self._parsed_content_type[0].lower()
|
||||
|
||||
@property
|
||||
def mimetype_params(self):
|
||||
"""The mimetype parameters as dict. For example if the content
|
||||
type is ``text/html; charset=utf-8`` the params would be
|
||||
``{'charset': 'utf-8'}``.
|
||||
"""
|
||||
self._parse_content_type()
|
||||
return self._parsed_content_type[1]
|
||||
|
||||
@cached_property
|
||||
def pragma(self):
|
||||
"""The Pragma general-header field is used to include
|
||||
implementation-specific directives that might apply to any recipient
|
||||
along the request/response chain. All pragma directives specify
|
||||
optional behavior from the viewpoint of the protocol; however, some
|
||||
systems MAY require that behavior be consistent with the directives.
|
||||
"""
|
||||
return parse_set_header(self.environ.get("HTTP_PRAGMA", ""))
|
||||
|
||||
|
||||
class CommonResponseDescriptorsMixin(object):
|
||||
"""A mixin for :class:`BaseResponse` subclasses. Response objects that
|
||||
mix this class in will automatically get descriptors for a couple of
|
||||
HTTP headers with automatic type conversion.
|
||||
"""
|
||||
|
||||
@property
|
||||
def mimetype(self):
|
||||
"""The mimetype (content type without charset etc.)"""
|
||||
ct = self.headers.get("content-type")
|
||||
if ct:
|
||||
return ct.split(";")[0].strip()
|
||||
|
||||
@mimetype.setter
|
||||
def mimetype(self, value):
|
||||
self.headers["Content-Type"] = get_content_type(value, self.charset)
|
||||
|
||||
@property
|
||||
def mimetype_params(self):
|
||||
"""The mimetype parameters as dict. For example if the
|
||||
content type is ``text/html; charset=utf-8`` the params would be
|
||||
``{'charset': 'utf-8'}``.
|
||||
|
||||
.. versionadded:: 0.5
|
||||
"""
|
||||
|
||||
def on_update(d):
|
||||
self.headers["Content-Type"] = dump_options_header(self.mimetype, d)
|
||||
|
||||
d = parse_options_header(self.headers.get("content-type", ""))[1]
|
||||
return CallbackDict(d, on_update)
|
||||
|
||||
location = header_property(
|
||||
"Location",
|
||||
doc="""The Location response-header field is used to redirect
|
||||
the recipient to a location other than the Request-URI for
|
||||
completion of the request or identification of a new
|
||||
resource.""",
|
||||
)
|
||||
age = header_property(
|
||||
"Age",
|
||||
None,
|
||||
parse_age,
|
||||
dump_age,
|
||||
doc="""The Age response-header field conveys the sender's
|
||||
estimate of the amount of time since the response (or its
|
||||
revalidation) was generated at the origin server.
|
||||
|
||||
Age values are non-negative decimal integers, representing time
|
||||
in seconds.""",
|
||||
)
|
||||
content_type = header_property(
|
||||
"Content-Type",
|
||||
doc="""The Content-Type entity-header field indicates the media
|
||||
type of the entity-body sent to the recipient or, in the case of
|
||||
the HEAD method, the media type that would have been sent had
|
||||
the request been a GET.""",
|
||||
)
|
||||
content_length = header_property(
|
||||
"Content-Length",
|
||||
None,
|
||||
int,
|
||||
str,
|
||||
doc="""The Content-Length entity-header field indicates the size
|
||||
of the entity-body, in decimal number of OCTETs, sent to the
|
||||
recipient or, in the case of the HEAD method, the size of the
|
||||
entity-body that would have been sent had the request been a
|
||||
GET.""",
|
||||
)
|
||||
content_location = header_property(
|
||||
"Content-Location",
|
||||
doc="""The Content-Location entity-header field MAY be used to
|
||||
supply the resource location for the entity enclosed in the
|
||||
message when that entity is accessible from a location separate
|
||||
from the requested resource's URI.""",
|
||||
)
|
||||
content_encoding = header_property(
|
||||
"Content-Encoding",
|
||||
doc="""The Content-Encoding entity-header field is used as a
|
||||
modifier to the media-type. When present, its value indicates
|
||||
what additional content codings have been applied to the
|
||||
entity-body, and thus what decoding mechanisms must be applied
|
||||
in order to obtain the media-type referenced by the Content-Type
|
||||
header field.""",
|
||||
)
|
||||
content_md5 = header_property(
|
||||
"Content-MD5",
|
||||
doc="""The Content-MD5 entity-header field, as defined in
|
||||
RFC 1864, is an MD5 digest of the entity-body for the purpose of
|
||||
providing an end-to-end message integrity check (MIC) of the
|
||||
entity-body. (Note: a MIC is good for detecting accidental
|
||||
modification of the entity-body in transit, but is not proof
|
||||
against malicious attacks.)""",
|
||||
)
|
||||
date = header_property(
|
||||
"Date",
|
||||
None,
|
||||
parse_date,
|
||||
http_date,
|
||||
doc="""The Date general-header field represents the date and
|
||||
time at which the message was originated, having the same
|
||||
semantics as orig-date in RFC 822.""",
|
||||
)
|
||||
expires = header_property(
|
||||
"Expires",
|
||||
None,
|
||||
parse_date,
|
||||
http_date,
|
||||
doc="""The Expires entity-header field gives the date/time after
|
||||
which the response is considered stale. A stale cache entry may
|
||||
not normally be returned by a cache.""",
|
||||
)
|
||||
last_modified = header_property(
|
||||
"Last-Modified",
|
||||
None,
|
||||
parse_date,
|
||||
http_date,
|
||||
doc="""The Last-Modified entity-header field indicates the date
|
||||
and time at which the origin server believes the variant was
|
||||
last modified.""",
|
||||
)
|
||||
|
||||
@property
|
||||
def retry_after(self):
|
||||
"""The Retry-After response-header field can be used with a
|
||||
503 (Service Unavailable) response to indicate how long the
|
||||
service is expected to be unavailable to the requesting client.
|
||||
|
||||
Time in seconds until expiration or date.
|
||||
"""
|
||||
value = self.headers.get("retry-after")
|
||||
if value is None:
|
||||
return
|
||||
elif value.isdigit():
|
||||
return datetime.utcnow() + timedelta(seconds=int(value))
|
||||
return parse_date(value)
|
||||
|
||||
@retry_after.setter
|
||||
def retry_after(self, value):
|
||||
if value is None:
|
||||
if "retry-after" in self.headers:
|
||||
del self.headers["retry-after"]
|
||||
return
|
||||
elif isinstance(value, datetime):
|
||||
value = http_date(value)
|
||||
else:
|
||||
value = str(value)
|
||||
self.headers["Retry-After"] = value
|
||||
|
||||
def _set_property(name, doc=None): # noqa: B902
|
||||
def fget(self):
|
||||
def on_update(header_set):
|
||||
if not header_set and name in self.headers:
|
||||
del self.headers[name]
|
||||
elif header_set:
|
||||
self.headers[name] = header_set.to_header()
|
||||
|
||||
return parse_set_header(self.headers.get(name), on_update)
|
||||
|
||||
def fset(self, value):
|
||||
if not value:
|
||||
del self.headers[name]
|
||||
elif isinstance(value, string_types):
|
||||
self.headers[name] = value
|
||||
else:
|
||||
self.headers[name] = dump_header(value)
|
||||
|
||||
return property(fget, fset, doc=doc)
|
||||
|
||||
vary = _set_property(
|
||||
"Vary",
|
||||
doc="""The Vary field value indicates the set of request-header
|
||||
fields that fully determines, while the response is fresh,
|
||||
whether a cache is permitted to use the response to reply to a
|
||||
subsequent request without revalidation.""",
|
||||
)
|
||||
content_language = _set_property(
|
||||
"Content-Language",
|
||||
doc="""The Content-Language entity-header field describes the
|
||||
natural language(s) of the intended audience for the enclosed
|
||||
entity. Note that this might not be equivalent to all the
|
||||
languages used within the entity-body.""",
|
||||
)
|
||||
allow = _set_property(
|
||||
"Allow",
|
||||
doc="""The Allow entity-header field lists the set of methods
|
||||
supported by the resource identified by the Request-URI. The
|
||||
purpose of this field is strictly to inform the recipient of
|
||||
valid methods associated with the resource. An Allow header
|
||||
field MUST be present in a 405 (Method Not Allowed)
|
||||
response.""",
|
||||
)
|
||||
|
||||
del _set_property
|
||||
304
python/werkzeug/wrappers/etag.py
Normal file
304
python/werkzeug/wrappers/etag.py
Normal file
@@ -0,0 +1,304 @@
|
||||
from .._compat import string_types
|
||||
from .._internal import _get_environ
|
||||
from ..datastructures import ContentRange
|
||||
from ..datastructures import RequestCacheControl
|
||||
from ..datastructures import ResponseCacheControl
|
||||
from ..http import generate_etag
|
||||
from ..http import http_date
|
||||
from ..http import is_resource_modified
|
||||
from ..http import parse_cache_control_header
|
||||
from ..http import parse_content_range_header
|
||||
from ..http import parse_date
|
||||
from ..http import parse_etags
|
||||
from ..http import parse_if_range_header
|
||||
from ..http import parse_range_header
|
||||
from ..http import quote_etag
|
||||
from ..http import unquote_etag
|
||||
from ..utils import cached_property
|
||||
from ..utils import header_property
|
||||
from ..wrappers.base_response import _clean_accept_ranges
|
||||
from ..wsgi import _RangeWrapper
|
||||
|
||||
|
||||
class ETagRequestMixin(object):
|
||||
"""Add entity tag and cache descriptors to a request object or object with
|
||||
a WSGI environment available as :attr:`~BaseRequest.environ`. This not
|
||||
only provides access to etags but also to the cache control header.
|
||||
"""
|
||||
|
||||
@cached_property
|
||||
def cache_control(self):
|
||||
"""A :class:`~werkzeug.datastructures.RequestCacheControl` object
|
||||
for the incoming cache control headers.
|
||||
"""
|
||||
cache_control = self.environ.get("HTTP_CACHE_CONTROL")
|
||||
return parse_cache_control_header(cache_control, None, RequestCacheControl)
|
||||
|
||||
@cached_property
|
||||
def if_match(self):
|
||||
"""An object containing all the etags in the `If-Match` header.
|
||||
|
||||
:rtype: :class:`~werkzeug.datastructures.ETags`
|
||||
"""
|
||||
return parse_etags(self.environ.get("HTTP_IF_MATCH"))
|
||||
|
||||
@cached_property
|
||||
def if_none_match(self):
|
||||
"""An object containing all the etags in the `If-None-Match` header.
|
||||
|
||||
:rtype: :class:`~werkzeug.datastructures.ETags`
|
||||
"""
|
||||
return parse_etags(self.environ.get("HTTP_IF_NONE_MATCH"))
|
||||
|
||||
@cached_property
|
||||
def if_modified_since(self):
|
||||
"""The parsed `If-Modified-Since` header as datetime object."""
|
||||
return parse_date(self.environ.get("HTTP_IF_MODIFIED_SINCE"))
|
||||
|
||||
@cached_property
|
||||
def if_unmodified_since(self):
|
||||
"""The parsed `If-Unmodified-Since` header as datetime object."""
|
||||
return parse_date(self.environ.get("HTTP_IF_UNMODIFIED_SINCE"))
|
||||
|
||||
@cached_property
|
||||
def if_range(self):
|
||||
"""The parsed `If-Range` header.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
|
||||
:rtype: :class:`~werkzeug.datastructures.IfRange`
|
||||
"""
|
||||
return parse_if_range_header(self.environ.get("HTTP_IF_RANGE"))
|
||||
|
||||
@cached_property
|
||||
def range(self):
|
||||
"""The parsed `Range` header.
|
||||
|
||||
.. versionadded:: 0.7
|
||||
|
||||
:rtype: :class:`~werkzeug.datastructures.Range`
|
||||
"""
|
||||
return parse_range_header(self.environ.get("HTTP_RANGE"))
|
||||
|
||||
|
||||
class ETagResponseMixin(object):
|
||||
"""Adds extra functionality to a response object for etag and cache
|
||||
handling. This mixin requires an object with at least a `headers`
|
||||
object that implements a dict like interface similar to
|
||||
:class:`~werkzeug.datastructures.Headers`.
|
||||
|
||||
If you want the :meth:`freeze` method to automatically add an etag, you
|
||||
have to mixin this method before the response base class. The default
|
||||
response class does not do that.
|
||||
"""
|
||||
|
||||
@property
|
||||
def cache_control(self):
|
||||
"""The Cache-Control general-header field is used to specify
|
||||
directives that MUST be obeyed by all caching mechanisms along the
|
||||
request/response chain.
|
||||
"""
|
||||
|
||||
def on_update(cache_control):
|
||||
if not cache_control and "cache-control" in self.headers:
|
||||
del self.headers["cache-control"]
|
||||
elif cache_control:
|
||||
self.headers["Cache-Control"] = cache_control.to_header()
|
||||
|
||||
return parse_cache_control_header(
|
||||
self.headers.get("cache-control"), on_update, ResponseCacheControl
|
||||
)
|
||||
|
||||
def _wrap_response(self, start, length):
|
||||
"""Wrap existing Response in case of Range Request context."""
|
||||
if self.status_code == 206:
|
||||
self.response = _RangeWrapper(self.response, start, length)
|
||||
|
||||
def _is_range_request_processable(self, environ):
|
||||
"""Return ``True`` if `Range` header is present and if underlying
|
||||
resource is considered unchanged when compared with `If-Range` header.
|
||||
"""
|
||||
return (
|
||||
"HTTP_IF_RANGE" not in environ
|
||||
or not is_resource_modified(
|
||||
environ,
|
||||
self.headers.get("etag"),
|
||||
None,
|
||||
self.headers.get("last-modified"),
|
||||
ignore_if_range=False,
|
||||
)
|
||||
) and "HTTP_RANGE" in environ
|
||||
|
||||
def _process_range_request(self, environ, complete_length=None, accept_ranges=None):
|
||||
"""Handle Range Request related headers (RFC7233). If `Accept-Ranges`
|
||||
header is valid, and Range Request is processable, we set the headers
|
||||
as described by the RFC, and wrap the underlying response in a
|
||||
RangeWrapper.
|
||||
|
||||
Returns ``True`` if Range Request can be fulfilled, ``False`` otherwise.
|
||||
|
||||
:raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
|
||||
if `Range` header could not be parsed or satisfied.
|
||||
"""
|
||||
from ..exceptions import RequestedRangeNotSatisfiable
|
||||
|
||||
if accept_ranges is None:
|
||||
return False
|
||||
self.headers["Accept-Ranges"] = accept_ranges
|
||||
if not self._is_range_request_processable(environ) or complete_length is None:
|
||||
return False
|
||||
parsed_range = parse_range_header(environ.get("HTTP_RANGE"))
|
||||
if parsed_range is None:
|
||||
raise RequestedRangeNotSatisfiable(complete_length)
|
||||
range_tuple = parsed_range.range_for_length(complete_length)
|
||||
content_range_header = parsed_range.to_content_range_header(complete_length)
|
||||
if range_tuple is None or content_range_header is None:
|
||||
raise RequestedRangeNotSatisfiable(complete_length)
|
||||
content_length = range_tuple[1] - range_tuple[0]
|
||||
# Be sure not to send 206 response
|
||||
# if requested range is the full content.
|
||||
if content_length != complete_length:
|
||||
self.headers["Content-Length"] = content_length
|
||||
self.content_range = content_range_header
|
||||
self.status_code = 206
|
||||
self._wrap_response(range_tuple[0], content_length)
|
||||
return True
|
||||
return False
|
||||
|
||||
def make_conditional(
|
||||
self, request_or_environ, accept_ranges=False, complete_length=None
|
||||
):
|
||||
"""Make the response conditional to the request. This method works
|
||||
best if an etag was defined for the response already. The `add_etag`
|
||||
method can be used to do that. If called without etag just the date
|
||||
header is set.
|
||||
|
||||
This does nothing if the request method in the request or environ is
|
||||
anything but GET or HEAD.
|
||||
|
||||
For optimal performance when handling range requests, it's recommended
|
||||
that your response data object implements `seekable`, `seek` and `tell`
|
||||
methods as described by :py:class:`io.IOBase`. Objects returned by
|
||||
:meth:`~werkzeug.wsgi.wrap_file` automatically implement those methods.
|
||||
|
||||
It does not remove the body of the response because that's something
|
||||
the :meth:`__call__` function does for us automatically.
|
||||
|
||||
Returns self so that you can do ``return resp.make_conditional(req)``
|
||||
but modifies the object in-place.
|
||||
|
||||
:param request_or_environ: a request object or WSGI environment to be
|
||||
used to make the response conditional
|
||||
against.
|
||||
:param accept_ranges: This parameter dictates the value of
|
||||
`Accept-Ranges` header. If ``False`` (default),
|
||||
the header is not set. If ``True``, it will be set
|
||||
to ``"bytes"``. If ``None``, it will be set to
|
||||
``"none"``. If it's a string, it will use this
|
||||
value.
|
||||
:param complete_length: Will be used only in valid Range Requests.
|
||||
It will set `Content-Range` complete length
|
||||
value and compute `Content-Length` real value.
|
||||
This parameter is mandatory for successful
|
||||
Range Requests completion.
|
||||
:raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
|
||||
if `Range` header could not be parsed or satisfied.
|
||||
"""
|
||||
environ = _get_environ(request_or_environ)
|
||||
if environ["REQUEST_METHOD"] in ("GET", "HEAD"):
|
||||
# if the date is not in the headers, add it now. We however
|
||||
# will not override an already existing header. Unfortunately
|
||||
# this header will be overriden by many WSGI servers including
|
||||
# wsgiref.
|
||||
if "date" not in self.headers:
|
||||
self.headers["Date"] = http_date()
|
||||
accept_ranges = _clean_accept_ranges(accept_ranges)
|
||||
is206 = self._process_range_request(environ, complete_length, accept_ranges)
|
||||
if not is206 and not is_resource_modified(
|
||||
environ,
|
||||
self.headers.get("etag"),
|
||||
None,
|
||||
self.headers.get("last-modified"),
|
||||
):
|
||||
if parse_etags(environ.get("HTTP_IF_MATCH")):
|
||||
self.status_code = 412
|
||||
else:
|
||||
self.status_code = 304
|
||||
if (
|
||||
self.automatically_set_content_length
|
||||
and "content-length" not in self.headers
|
||||
):
|
||||
length = self.calculate_content_length()
|
||||
if length is not None:
|
||||
self.headers["Content-Length"] = length
|
||||
return self
|
||||
|
||||
def add_etag(self, overwrite=False, weak=False):
|
||||
"""Add an etag for the current response if there is none yet."""
|
||||
if overwrite or "etag" not in self.headers:
|
||||
self.set_etag(generate_etag(self.get_data()), weak)
|
||||
|
||||
def set_etag(self, etag, weak=False):
|
||||
"""Set the etag, and override the old one if there was one."""
|
||||
self.headers["ETag"] = quote_etag(etag, weak)
|
||||
|
||||
def get_etag(self):
|
||||
"""Return a tuple in the form ``(etag, is_weak)``. If there is no
|
||||
ETag the return value is ``(None, None)``.
|
||||
"""
|
||||
return unquote_etag(self.headers.get("ETag"))
|
||||
|
||||
def freeze(self, no_etag=False):
|
||||
"""Call this method if you want to make your response object ready for
|
||||
pickeling. This buffers the generator if there is one. This also
|
||||
sets the etag unless `no_etag` is set to `True`.
|
||||
"""
|
||||
if not no_etag:
|
||||
self.add_etag()
|
||||
super(ETagResponseMixin, self).freeze()
|
||||
|
||||
accept_ranges = header_property(
|
||||
"Accept-Ranges",
|
||||
doc="""The `Accept-Ranges` header. Even though the name would
|
||||
indicate that multiple values are supported, it must be one
|
||||
string token only.
|
||||
|
||||
The values ``'bytes'`` and ``'none'`` are common.
|
||||
|
||||
.. versionadded:: 0.7""",
|
||||
)
|
||||
|
||||
def _get_content_range(self):
|
||||
def on_update(rng):
|
||||
if not rng:
|
||||
del self.headers["content-range"]
|
||||
else:
|
||||
self.headers["Content-Range"] = rng.to_header()
|
||||
|
||||
rv = parse_content_range_header(self.headers.get("content-range"), on_update)
|
||||
# always provide a content range object to make the descriptor
|
||||
# more user friendly. It provides an unset() method that can be
|
||||
# used to remove the header quickly.
|
||||
if rv is None:
|
||||
rv = ContentRange(None, None, None, on_update=on_update)
|
||||
return rv
|
||||
|
||||
def _set_content_range(self, value):
|
||||
if not value:
|
||||
del self.headers["content-range"]
|
||||
elif isinstance(value, string_types):
|
||||
self.headers["Content-Range"] = value
|
||||
else:
|
||||
self.headers["Content-Range"] = value.to_header()
|
||||
|
||||
content_range = property(
|
||||
_get_content_range,
|
||||
_set_content_range,
|
||||
doc="""The ``Content-Range`` header as
|
||||
:class:`~werkzeug.datastructures.ContentRange` object. Even if
|
||||
the header is not set it wil provide such an object for easier
|
||||
manipulation.
|
||||
|
||||
.. versionadded:: 0.7""",
|
||||
)
|
||||
del _get_content_range, _set_content_range
|
||||
145
python/werkzeug/wrappers/json.py
Normal file
145
python/werkzeug/wrappers/json.py
Normal file
@@ -0,0 +1,145 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import datetime
|
||||
import uuid
|
||||
|
||||
from .._compat import text_type
|
||||
from ..exceptions import BadRequest
|
||||
from ..utils import detect_utf_encoding
|
||||
|
||||
try:
|
||||
import simplejson as _json
|
||||
except ImportError:
|
||||
import json as _json
|
||||
|
||||
|
||||
class _JSONModule(object):
|
||||
@staticmethod
|
||||
def _default(o):
|
||||
if isinstance(o, datetime.date):
|
||||
return o.isoformat()
|
||||
|
||||
if isinstance(o, uuid.UUID):
|
||||
return str(o)
|
||||
|
||||
if hasattr(o, "__html__"):
|
||||
return text_type(o.__html__())
|
||||
|
||||
raise TypeError()
|
||||
|
||||
@classmethod
|
||||
def dumps(cls, obj, **kw):
|
||||
kw.setdefault("separators", (",", ":"))
|
||||
kw.setdefault("default", cls._default)
|
||||
kw.setdefault("sort_keys", True)
|
||||
return _json.dumps(obj, **kw)
|
||||
|
||||
@staticmethod
|
||||
def loads(s, **kw):
|
||||
if isinstance(s, bytes):
|
||||
# Needed for Python < 3.6
|
||||
encoding = detect_utf_encoding(s)
|
||||
s = s.decode(encoding)
|
||||
|
||||
return _json.loads(s, **kw)
|
||||
|
||||
|
||||
class JSONMixin(object):
|
||||
"""Mixin to parse :attr:`data` as JSON. Can be mixed in for both
|
||||
:class:`~werkzeug.wrappers.Request` and
|
||||
:class:`~werkzeug.wrappers.Response` classes.
|
||||
|
||||
If `simplejson`_ is installed it is preferred over Python's built-in
|
||||
:mod:`json` module.
|
||||
|
||||
.. _simplejson: https://simplejson.readthedocs.io/en/latest/
|
||||
"""
|
||||
|
||||
#: A module or other object that has ``dumps`` and ``loads``
|
||||
#: functions that match the API of the built-in :mod:`json` module.
|
||||
json_module = _JSONModule
|
||||
|
||||
@property
|
||||
def json(self):
|
||||
"""The parsed JSON data if :attr:`mimetype` indicates JSON
|
||||
(:mimetype:`application/json`, see :meth:`is_json`).
|
||||
|
||||
Calls :meth:`get_json` with default arguments.
|
||||
"""
|
||||
return self.get_json()
|
||||
|
||||
@property
|
||||
def is_json(self):
|
||||
"""Check if the mimetype indicates JSON data, either
|
||||
:mimetype:`application/json` or :mimetype:`application/*+json`.
|
||||
"""
|
||||
mt = self.mimetype
|
||||
return (
|
||||
mt == "application/json"
|
||||
or mt.startswith("application/")
|
||||
and mt.endswith("+json")
|
||||
)
|
||||
|
||||
def _get_data_for_json(self, cache):
|
||||
try:
|
||||
return self.get_data(cache=cache)
|
||||
except TypeError:
|
||||
# Response doesn't have cache param.
|
||||
return self.get_data()
|
||||
|
||||
# Cached values for ``(silent=False, silent=True)``. Initialized
|
||||
# with sentinel values.
|
||||
_cached_json = (Ellipsis, Ellipsis)
|
||||
|
||||
def get_json(self, force=False, silent=False, cache=True):
|
||||
"""Parse :attr:`data` as JSON.
|
||||
|
||||
If the mimetype does not indicate JSON
|
||||
(:mimetype:`application/json`, see :meth:`is_json`), this
|
||||
returns ``None``.
|
||||
|
||||
If parsing fails, :meth:`on_json_loading_failed` is called and
|
||||
its return value is used as the return value.
|
||||
|
||||
:param force: Ignore the mimetype and always try to parse JSON.
|
||||
:param silent: Silence parsing errors and return ``None``
|
||||
instead.
|
||||
:param cache: Store the parsed JSON to return for subsequent
|
||||
calls.
|
||||
"""
|
||||
if cache and self._cached_json[silent] is not Ellipsis:
|
||||
return self._cached_json[silent]
|
||||
|
||||
if not (force or self.is_json):
|
||||
return None
|
||||
|
||||
data = self._get_data_for_json(cache=cache)
|
||||
|
||||
try:
|
||||
rv = self.json_module.loads(data)
|
||||
except ValueError as e:
|
||||
if silent:
|
||||
rv = None
|
||||
|
||||
if cache:
|
||||
normal_rv, _ = self._cached_json
|
||||
self._cached_json = (normal_rv, rv)
|
||||
else:
|
||||
rv = self.on_json_loading_failed(e)
|
||||
|
||||
if cache:
|
||||
_, silent_rv = self._cached_json
|
||||
self._cached_json = (rv, silent_rv)
|
||||
else:
|
||||
if cache:
|
||||
self._cached_json = (rv, rv)
|
||||
|
||||
return rv
|
||||
|
||||
def on_json_loading_failed(self, e):
|
||||
"""Called if :meth:`get_json` parsing fails and isn't silenced.
|
||||
If this method returns a value, it is used as the return value
|
||||
for :meth:`get_json`. The default implementation raises
|
||||
:exc:`~werkzeug.exceptions.BadRequest`.
|
||||
"""
|
||||
raise BadRequest("Failed to decode JSON object: {0}".format(e))
|
||||
44
python/werkzeug/wrappers/request.py
Normal file
44
python/werkzeug/wrappers/request.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from .accept import AcceptMixin
|
||||
from .auth import AuthorizationMixin
|
||||
from .base_request import BaseRequest
|
||||
from .common_descriptors import CommonRequestDescriptorsMixin
|
||||
from .etag import ETagRequestMixin
|
||||
from .user_agent import UserAgentMixin
|
||||
|
||||
|
||||
class Request(
|
||||
BaseRequest,
|
||||
AcceptMixin,
|
||||
ETagRequestMixin,
|
||||
UserAgentMixin,
|
||||
AuthorizationMixin,
|
||||
CommonRequestDescriptorsMixin,
|
||||
):
|
||||
"""Full featured request object implementing the following mixins:
|
||||
|
||||
- :class:`AcceptMixin` for accept header parsing
|
||||
- :class:`ETagRequestMixin` for etag and cache control handling
|
||||
- :class:`UserAgentMixin` for user agent introspection
|
||||
- :class:`AuthorizationMixin` for http auth handling
|
||||
- :class:`CommonRequestDescriptorsMixin` for common headers
|
||||
"""
|
||||
|
||||
|
||||
class StreamOnlyMixin(object):
|
||||
"""If mixed in before the request object this will change the bahavior
|
||||
of it to disable handling of form parsing. This disables the
|
||||
:attr:`files`, :attr:`form` attributes and will just provide a
|
||||
:attr:`stream` attribute that however is always available.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
"""
|
||||
|
||||
disable_data_descriptor = True
|
||||
want_form_data_parsed = False
|
||||
|
||||
|
||||
class PlainRequest(StreamOnlyMixin, Request):
|
||||
"""A request object without special form parsing capabilities.
|
||||
|
||||
.. versionadded:: 0.9
|
||||
"""
|
||||
78
python/werkzeug/wrappers/response.py
Normal file
78
python/werkzeug/wrappers/response.py
Normal file
@@ -0,0 +1,78 @@
|
||||
from ..utils import cached_property
|
||||
from .auth import WWWAuthenticateMixin
|
||||
from .base_response import BaseResponse
|
||||
from .common_descriptors import CommonResponseDescriptorsMixin
|
||||
from .etag import ETagResponseMixin
|
||||
|
||||
|
||||
class ResponseStream(object):
|
||||
"""A file descriptor like object used by the :class:`ResponseStreamMixin` to
|
||||
represent the body of the stream. It directly pushes into the response
|
||||
iterable of the response object.
|
||||
"""
|
||||
|
||||
mode = "wb+"
|
||||
|
||||
def __init__(self, response):
|
||||
self.response = response
|
||||
self.closed = False
|
||||
|
||||
def write(self, value):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
self.response._ensure_sequence(mutable=True)
|
||||
self.response.response.append(value)
|
||||
self.response.headers.pop("Content-Length", None)
|
||||
return len(value)
|
||||
|
||||
def writelines(self, seq):
|
||||
for item in seq:
|
||||
self.write(item)
|
||||
|
||||
def close(self):
|
||||
self.closed = True
|
||||
|
||||
def flush(self):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
|
||||
def isatty(self):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
return False
|
||||
|
||||
def tell(self):
|
||||
self.response._ensure_sequence()
|
||||
return sum(map(len, self.response.response))
|
||||
|
||||
@property
|
||||
def encoding(self):
|
||||
return self.response.charset
|
||||
|
||||
|
||||
class ResponseStreamMixin(object):
|
||||
"""Mixin for :class:`BaseRequest` subclasses. Classes that inherit from
|
||||
this mixin will automatically get a :attr:`stream` property that provides
|
||||
a write-only interface to the response iterable.
|
||||
"""
|
||||
|
||||
@cached_property
|
||||
def stream(self):
|
||||
"""The response iterable as write-only stream."""
|
||||
return ResponseStream(self)
|
||||
|
||||
|
||||
class Response(
|
||||
BaseResponse,
|
||||
ETagResponseMixin,
|
||||
ResponseStreamMixin,
|
||||
CommonResponseDescriptorsMixin,
|
||||
WWWAuthenticateMixin,
|
||||
):
|
||||
"""Full featured response object implementing the following mixins:
|
||||
|
||||
- :class:`ETagResponseMixin` for etag and cache control handling
|
||||
- :class:`ResponseStreamMixin` to add support for the `stream` property
|
||||
- :class:`CommonResponseDescriptorsMixin` for various HTTP descriptors
|
||||
- :class:`WWWAuthenticateMixin` for HTTP authentication support
|
||||
"""
|
||||
15
python/werkzeug/wrappers/user_agent.py
Normal file
15
python/werkzeug/wrappers/user_agent.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from ..utils import cached_property
|
||||
|
||||
|
||||
class UserAgentMixin(object):
|
||||
"""Adds a `user_agent` attribute to the request object which
|
||||
contains the parsed user agent of the browser that triggered the
|
||||
request as a :class:`~werkzeug.useragents.UserAgent` object.
|
||||
"""
|
||||
|
||||
@cached_property
|
||||
def user_agent(self):
|
||||
"""The current user agent."""
|
||||
from ..useragents import UserAgent
|
||||
|
||||
return UserAgent(self.environ)
|
||||
1067
python/werkzeug/wsgi.py
Normal file
1067
python/werkzeug/wsgi.py
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user