track embedded python distribution
This commit is contained in:
6
.gitignore
vendored
6
.gitignore
vendored
@@ -1,12 +1,6 @@
|
|||||||
__pycache__/
|
__pycache__/
|
||||||
*.py[cod]
|
*.py[cod]
|
||||||
*$py.class
|
*$py.class
|
||||||
youtube_dl_old/
|
|
||||||
python/
|
|
||||||
gevent/
|
|
||||||
debug/
|
debug/
|
||||||
data/
|
data/
|
||||||
banned_addresses.txt
|
banned_addresses.txt
|
||||||
youtube/common_old.py
|
|
||||||
youtube/common_older.py
|
|
||||||
youtube/watch_old.py
|
|
||||||
|
|||||||
56
python/brotli.py
Normal file
56
python/brotli.py
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
# Copyright 2016 The Brotli Authors. All rights reserved.
|
||||||
|
#
|
||||||
|
# Distributed under MIT license.
|
||||||
|
# See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
|
||||||
|
|
||||||
|
"""Functions to compress and decompress data using the Brotli library."""
|
||||||
|
|
||||||
|
import _brotli
|
||||||
|
|
||||||
|
|
||||||
|
# The library version.
|
||||||
|
__version__ = _brotli.__version__
|
||||||
|
|
||||||
|
# The compression mode.
|
||||||
|
MODE_GENERIC = _brotli.MODE_GENERIC
|
||||||
|
MODE_TEXT = _brotli.MODE_TEXT
|
||||||
|
MODE_FONT = _brotli.MODE_FONT
|
||||||
|
|
||||||
|
# The Compressor object.
|
||||||
|
Compressor = _brotli.Compressor
|
||||||
|
|
||||||
|
# The Decompressor object.
|
||||||
|
Decompressor = _brotli.Decompressor
|
||||||
|
|
||||||
|
# Compress a byte string.
|
||||||
|
def compress(string, mode=MODE_GENERIC, quality=11, lgwin=22, lgblock=0):
|
||||||
|
"""Compress a byte string.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
string (bytes): The input data.
|
||||||
|
mode (int, optional): The compression mode can be MODE_GENERIC (default),
|
||||||
|
MODE_TEXT (for UTF-8 format text input) or MODE_FONT (for WOFF 2.0).
|
||||||
|
quality (int, optional): Controls the compression-speed vs compression-
|
||||||
|
density tradeoff. The higher the quality, the slower the compression.
|
||||||
|
Range is 0 to 11. Defaults to 11.
|
||||||
|
lgwin (int, optional): Base 2 logarithm of the sliding window size. Range
|
||||||
|
is 10 to 24. Defaults to 22.
|
||||||
|
lgblock (int, optional): Base 2 logarithm of the maximum input block size.
|
||||||
|
Range is 16 to 24. If set to 0, the value will be set based on the
|
||||||
|
quality. Defaults to 0.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The compressed byte string.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
brotli.error: If arguments are invalid, or compressor fails.
|
||||||
|
"""
|
||||||
|
compressor = Compressor(mode=mode, quality=quality, lgwin=lgwin,
|
||||||
|
lgblock=lgblock)
|
||||||
|
return compressor.process(string) + compressor.finish()
|
||||||
|
|
||||||
|
# Decompress a compressed byte string.
|
||||||
|
decompress = _brotli.decompress
|
||||||
|
|
||||||
|
# Raised if compression or decompression fails.
|
||||||
|
error = _brotli.error
|
||||||
136
python/gevent/__init__.py
Normal file
136
python/gevent/__init__.py
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
|
||||||
|
"""
|
||||||
|
gevent is a coroutine-based Python networking library that uses greenlet
|
||||||
|
to provide a high-level synchronous API on top of libev event loop.
|
||||||
|
|
||||||
|
See http://www.gevent.org/ for the documentation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
from collections import namedtuple
|
||||||
|
|
||||||
|
_version_info = namedtuple('version_info',
|
||||||
|
('major', 'minor', 'micro', 'releaselevel', 'serial'))
|
||||||
|
|
||||||
|
#: The programatic version identifier. The fields have (roughly) the
|
||||||
|
#: same meaning as :data:`sys.version_info`
|
||||||
|
#: Deprecated in 1.2.
|
||||||
|
version_info = _version_info(1, 2, 2, 'dev', 0)
|
||||||
|
|
||||||
|
#: The human-readable PEP 440 version identifier
|
||||||
|
__version__ = '1.2.2'
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['get_hub',
|
||||||
|
'Greenlet',
|
||||||
|
'GreenletExit',
|
||||||
|
'spawn',
|
||||||
|
'spawn_later',
|
||||||
|
'spawn_raw',
|
||||||
|
'iwait',
|
||||||
|
'wait',
|
||||||
|
'killall',
|
||||||
|
'Timeout',
|
||||||
|
'with_timeout',
|
||||||
|
'getcurrent',
|
||||||
|
'sleep',
|
||||||
|
'idle',
|
||||||
|
'kill',
|
||||||
|
'signal',
|
||||||
|
'fork',
|
||||||
|
'reinit']
|
||||||
|
|
||||||
|
|
||||||
|
import sys
|
||||||
|
if sys.platform == 'win32':
|
||||||
|
# trigger WSAStartup call
|
||||||
|
import socket # pylint:disable=unused-import,useless-suppression
|
||||||
|
del socket
|
||||||
|
|
||||||
|
from gevent.hub import get_hub, iwait, wait
|
||||||
|
from gevent.greenlet import Greenlet, joinall, killall
|
||||||
|
joinall = joinall # export for pylint
|
||||||
|
spawn = Greenlet.spawn
|
||||||
|
spawn_later = Greenlet.spawn_later
|
||||||
|
|
||||||
|
from gevent.timeout import Timeout, with_timeout
|
||||||
|
from gevent.hub import getcurrent, GreenletExit, spawn_raw, sleep, idle, kill, reinit
|
||||||
|
try:
|
||||||
|
from gevent.os import fork
|
||||||
|
except ImportError:
|
||||||
|
__all__.remove('fork')
|
||||||
|
|
||||||
|
# See https://github.com/gevent/gevent/issues/648
|
||||||
|
# A temporary backwards compatibility shim to enable users to continue
|
||||||
|
# to treat 'from gevent import signal' as a callable, to matter whether
|
||||||
|
# the 'gevent.signal' module has been imported first
|
||||||
|
from gevent.hub import signal as _signal_class
|
||||||
|
from gevent import signal as _signal_module
|
||||||
|
|
||||||
|
# The object 'gevent.signal' must:
|
||||||
|
# - be callable, returning a gevent.hub.signal;
|
||||||
|
# - answer True to isinstance(gevent.signal(...), gevent.signal);
|
||||||
|
# - answer True to isinstance(gevent.signal(...), gevent.hub.signal)
|
||||||
|
# - have all the attributes of the module 'gevent.signal';
|
||||||
|
# - answer True to isinstance(gevent.signal, types.ModuleType) (optional)
|
||||||
|
|
||||||
|
# The only way to do this is to use a metaclass, an instance of which (a class)
|
||||||
|
# is put in sys.modules and is substituted for gevent.hub.signal.
|
||||||
|
# This handles everything except the last one.
|
||||||
|
|
||||||
|
|
||||||
|
class _signal_metaclass(type):
|
||||||
|
|
||||||
|
def __getattr__(cls, name):
|
||||||
|
return getattr(_signal_module, name)
|
||||||
|
|
||||||
|
def __setattr__(cls, name, value):
|
||||||
|
setattr(_signal_module, name, value)
|
||||||
|
|
||||||
|
def __instancecheck__(cls, instance):
|
||||||
|
return isinstance(instance, _signal_class)
|
||||||
|
|
||||||
|
def __dir__(cls):
|
||||||
|
return dir(_signal_module)
|
||||||
|
|
||||||
|
|
||||||
|
class signal(object):
|
||||||
|
|
||||||
|
__doc__ = _signal_module.__doc__
|
||||||
|
|
||||||
|
def __new__(cls, *args, **kwargs):
|
||||||
|
return _signal_class(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
# The metaclass is applied after the class declaration
|
||||||
|
# for Python 2/3 compatibility
|
||||||
|
signal = _signal_metaclass(str("signal"),
|
||||||
|
(),
|
||||||
|
dict(signal.__dict__))
|
||||||
|
|
||||||
|
sys.modules['gevent.signal'] = signal
|
||||||
|
sys.modules['gevent.hub'].signal = signal
|
||||||
|
|
||||||
|
del sys
|
||||||
|
|
||||||
|
|
||||||
|
# the following makes hidden imports visible to freezing tools like
|
||||||
|
# py2exe. see https://github.com/gevent/gevent/issues/181
|
||||||
|
|
||||||
|
def __dependencies_for_freezing():
|
||||||
|
# pylint:disable=unused-variable
|
||||||
|
from gevent import core
|
||||||
|
from gevent import resolver_thread
|
||||||
|
from gevent import resolver_ares
|
||||||
|
from gevent import socket as _socket
|
||||||
|
from gevent import threadpool
|
||||||
|
from gevent import thread
|
||||||
|
from gevent import threading
|
||||||
|
from gevent import select
|
||||||
|
from gevent import subprocess
|
||||||
|
import pprint
|
||||||
|
import traceback
|
||||||
|
import signal as _signal
|
||||||
|
|
||||||
|
del __dependencies_for_freezing
|
||||||
48
python/gevent/_compat.py
Normal file
48
python/gevent/_compat.py
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
internal gevent python 2/python 3 bridges. Not for external use.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import print_function, absolute_import, division
|
||||||
|
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
PY2 = sys.version_info[0] == 2
|
||||||
|
PY3 = sys.version_info[0] >= 3
|
||||||
|
PYPY = hasattr(sys, 'pypy_version_info')
|
||||||
|
|
||||||
|
## Types
|
||||||
|
|
||||||
|
if PY3:
|
||||||
|
string_types = (str,)
|
||||||
|
integer_types = (int,)
|
||||||
|
text_type = str
|
||||||
|
|
||||||
|
else:
|
||||||
|
import __builtin__ # pylint:disable=import-error
|
||||||
|
string_types = __builtin__.basestring,
|
||||||
|
text_type = __builtin__.unicode
|
||||||
|
integer_types = (int, __builtin__.long)
|
||||||
|
|
||||||
|
|
||||||
|
## Exceptions
|
||||||
|
if PY3:
|
||||||
|
def reraise(t, value, tb=None): # pylint:disable=unused-argument
|
||||||
|
if value.__traceback__ is not tb and tb is not None:
|
||||||
|
raise value.with_traceback(tb)
|
||||||
|
raise value
|
||||||
|
|
||||||
|
else:
|
||||||
|
from gevent._util_py2 import reraise # pylint:disable=import-error,no-name-in-module
|
||||||
|
reraise = reraise # export
|
||||||
|
|
||||||
|
## Functions
|
||||||
|
if PY3:
|
||||||
|
iteritems = dict.items
|
||||||
|
itervalues = dict.values
|
||||||
|
xrange = range
|
||||||
|
else:
|
||||||
|
iteritems = dict.iteritems # python 3: pylint:disable=no-member
|
||||||
|
itervalues = dict.itervalues # python 3: pylint:disable=no-member
|
||||||
|
xrange = __builtin__.xrange
|
||||||
128
python/gevent/_fileobjectcommon.py
Normal file
128
python/gevent/_fileobjectcommon.py
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
|
||||||
|
try:
|
||||||
|
from errno import EBADF
|
||||||
|
except ImportError:
|
||||||
|
EBADF = 9
|
||||||
|
|
||||||
|
from io import TextIOWrapper
|
||||||
|
|
||||||
|
class cancel_wait_ex(IOError):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super(cancel_wait_ex, self).__init__(
|
||||||
|
EBADF, 'File descriptor was closed in another greenlet')
|
||||||
|
|
||||||
|
|
||||||
|
class FileObjectClosed(IOError):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super(FileObjectClosed, self).__init__(
|
||||||
|
EBADF, 'Bad file descriptor (FileObject was closed)')
|
||||||
|
|
||||||
|
class FileObjectBase(object):
|
||||||
|
"""
|
||||||
|
Internal base class to ensure a level of consistency
|
||||||
|
between FileObjectPosix and FileObjectThread
|
||||||
|
"""
|
||||||
|
|
||||||
|
# List of methods we delegate to the wrapping IO object, if they
|
||||||
|
# implement them and we do not.
|
||||||
|
_delegate_methods = (
|
||||||
|
# General methods
|
||||||
|
'flush',
|
||||||
|
'fileno',
|
||||||
|
'writable',
|
||||||
|
'readable',
|
||||||
|
'seek',
|
||||||
|
'seekable',
|
||||||
|
'tell',
|
||||||
|
|
||||||
|
# Read
|
||||||
|
'read',
|
||||||
|
'readline',
|
||||||
|
'readlines',
|
||||||
|
'read1',
|
||||||
|
|
||||||
|
# Write
|
||||||
|
'write',
|
||||||
|
'writelines',
|
||||||
|
'truncate',
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Whether we are translating universal newlines or not.
|
||||||
|
_translate = False
|
||||||
|
|
||||||
|
def __init__(self, io, closefd):
|
||||||
|
"""
|
||||||
|
:param io: An io.IOBase-like object.
|
||||||
|
"""
|
||||||
|
self._io = io
|
||||||
|
# We don't actually use this property ourself, but we save it (and
|
||||||
|
# pass it along) for compatibility.
|
||||||
|
self._close = closefd
|
||||||
|
|
||||||
|
if self._translate:
|
||||||
|
# This automatically handles delegation.
|
||||||
|
self.translate_newlines(None)
|
||||||
|
else:
|
||||||
|
self._do_delegate_methods()
|
||||||
|
|
||||||
|
|
||||||
|
io = property(lambda s: s._io,
|
||||||
|
# Historically we either hand-wrote all the delegation methods
|
||||||
|
# to use self.io, or we simply used __getattr__ to look them up at
|
||||||
|
# runtime. This meant people could change the io attribute on the fly
|
||||||
|
# and it would mostly work (subprocess.py used to do that). We don't recommend
|
||||||
|
# that, but we still support it.
|
||||||
|
lambda s, nv: setattr(s, '_io', nv) or s._do_delegate_methods())
|
||||||
|
|
||||||
|
def _do_delegate_methods(self):
|
||||||
|
for meth_name in self._delegate_methods:
|
||||||
|
meth = getattr(self._io, meth_name, None)
|
||||||
|
implemented_by_class = hasattr(type(self), meth_name)
|
||||||
|
if meth and not implemented_by_class:
|
||||||
|
setattr(self, meth_name, self._wrap_method(meth))
|
||||||
|
elif hasattr(self, meth_name) and not implemented_by_class:
|
||||||
|
delattr(self, meth_name)
|
||||||
|
|
||||||
|
def _wrap_method(self, method):
|
||||||
|
"""
|
||||||
|
Wrap a method we're copying into our dictionary from the underlying
|
||||||
|
io object to do something special or different, if necessary.
|
||||||
|
"""
|
||||||
|
return method
|
||||||
|
|
||||||
|
def translate_newlines(self, mode, *text_args, **text_kwargs):
|
||||||
|
wrapper = TextIOWrapper(self._io, *text_args, **text_kwargs)
|
||||||
|
if mode:
|
||||||
|
wrapper.mode = mode
|
||||||
|
self.io = wrapper
|
||||||
|
self._translate = True
|
||||||
|
|
||||||
|
@property
|
||||||
|
def closed(self):
|
||||||
|
"""True if the file is closed"""
|
||||||
|
return self._io is None
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if self._io is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
io = self._io
|
||||||
|
self._io = None
|
||||||
|
self._do_close(io, self._close)
|
||||||
|
|
||||||
|
def _do_close(self, fobj, closefd):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
if self._io is None:
|
||||||
|
raise FileObjectClosed()
|
||||||
|
return getattr(self._io, name)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return '<%s _fobj=%r%s>' % (self.__class__.__name__, self.io, self._extra_repr())
|
||||||
|
|
||||||
|
def _extra_repr(self):
|
||||||
|
return ''
|
||||||
285
python/gevent/_fileobjectposix.py
Normal file
285
python/gevent/_fileobjectposix.py
Normal file
@@ -0,0 +1,285 @@
|
|||||||
|
from __future__ import absolute_import
|
||||||
|
import os
|
||||||
|
import io
|
||||||
|
from io import BufferedReader
|
||||||
|
from io import BufferedWriter
|
||||||
|
from io import BytesIO
|
||||||
|
from io import DEFAULT_BUFFER_SIZE
|
||||||
|
from io import RawIOBase
|
||||||
|
from io import UnsupportedOperation
|
||||||
|
|
||||||
|
from gevent._fileobjectcommon import cancel_wait_ex
|
||||||
|
from gevent._fileobjectcommon import FileObjectBase
|
||||||
|
from gevent.hub import get_hub
|
||||||
|
from gevent.os import _read
|
||||||
|
from gevent.os import _write
|
||||||
|
from gevent.os import ignored_errors
|
||||||
|
from gevent.os import make_nonblocking
|
||||||
|
|
||||||
|
|
||||||
|
class GreenFileDescriptorIO(RawIOBase):
|
||||||
|
|
||||||
|
# Note that RawIOBase has a __del__ method that calls
|
||||||
|
# self.close(). (In C implementations like CPython, this is
|
||||||
|
# the type's tp_dealloc slot; prior to Python 3, the object doesn't
|
||||||
|
# appear to have a __del__ method, even though it functionally does)
|
||||||
|
|
||||||
|
_read_event = None
|
||||||
|
_write_event = None
|
||||||
|
|
||||||
|
def __init__(self, fileno, mode='r', closefd=True):
|
||||||
|
RawIOBase.__init__(self) # Python 2: pylint:disable=no-member,non-parent-init-called
|
||||||
|
self._closed = False
|
||||||
|
self._closefd = closefd
|
||||||
|
self._fileno = fileno
|
||||||
|
make_nonblocking(fileno)
|
||||||
|
self._readable = 'r' in mode
|
||||||
|
self._writable = 'w' in mode
|
||||||
|
self.hub = get_hub()
|
||||||
|
|
||||||
|
io_watcher = self.hub.loop.io
|
||||||
|
if self._readable:
|
||||||
|
self._read_event = io_watcher(fileno, 1)
|
||||||
|
|
||||||
|
if self._writable:
|
||||||
|
self._write_event = io_watcher(fileno, 2)
|
||||||
|
|
||||||
|
self._seekable = None
|
||||||
|
|
||||||
|
def readable(self):
|
||||||
|
return self._readable
|
||||||
|
|
||||||
|
def writable(self):
|
||||||
|
return self._writable
|
||||||
|
|
||||||
|
def seekable(self):
|
||||||
|
if self._seekable is None:
|
||||||
|
try:
|
||||||
|
os.lseek(self._fileno, 0, os.SEEK_CUR)
|
||||||
|
except OSError:
|
||||||
|
self._seekable = False
|
||||||
|
else:
|
||||||
|
self._seekable = True
|
||||||
|
return self._seekable
|
||||||
|
|
||||||
|
def fileno(self):
|
||||||
|
return self._fileno
|
||||||
|
|
||||||
|
@property
|
||||||
|
def closed(self):
|
||||||
|
return self._closed
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if self._closed:
|
||||||
|
return
|
||||||
|
self.flush()
|
||||||
|
self._closed = True
|
||||||
|
if self._readable:
|
||||||
|
self.hub.cancel_wait(self._read_event, cancel_wait_ex)
|
||||||
|
if self._writable:
|
||||||
|
self.hub.cancel_wait(self._write_event, cancel_wait_ex)
|
||||||
|
fileno = self._fileno
|
||||||
|
if self._closefd:
|
||||||
|
self._fileno = None
|
||||||
|
os.close(fileno)
|
||||||
|
|
||||||
|
# RawIOBase provides a 'read' method that will call readall() if
|
||||||
|
# the `size` was missing or -1 and otherwise call readinto(). We
|
||||||
|
# want to take advantage of this to avoid single byte reads when
|
||||||
|
# possible. This is highlighted by a bug in BufferedIOReader that
|
||||||
|
# calls read() in a loop when its readall() method is invoked;
|
||||||
|
# this was fixed in Python 3.3. See
|
||||||
|
# https://github.com/gevent/gevent/issues/675)
|
||||||
|
def __read(self, n):
|
||||||
|
if not self._readable:
|
||||||
|
raise UnsupportedOperation('read')
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return _read(self._fileno, n)
|
||||||
|
except (IOError, OSError) as ex:
|
||||||
|
if ex.args[0] not in ignored_errors:
|
||||||
|
raise
|
||||||
|
self.hub.wait(self._read_event)
|
||||||
|
|
||||||
|
def readall(self):
|
||||||
|
ret = BytesIO()
|
||||||
|
while True:
|
||||||
|
data = self.__read(DEFAULT_BUFFER_SIZE)
|
||||||
|
if not data:
|
||||||
|
break
|
||||||
|
ret.write(data)
|
||||||
|
return ret.getvalue()
|
||||||
|
|
||||||
|
def readinto(self, b):
|
||||||
|
data = self.__read(len(b))
|
||||||
|
n = len(data)
|
||||||
|
try:
|
||||||
|
b[:n] = data
|
||||||
|
except TypeError as err:
|
||||||
|
import array
|
||||||
|
if not isinstance(b, array.array):
|
||||||
|
raise err
|
||||||
|
b[:n] = array.array(b'b', data)
|
||||||
|
return n
|
||||||
|
|
||||||
|
def write(self, b):
|
||||||
|
if not self._writable:
|
||||||
|
raise UnsupportedOperation('write')
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return _write(self._fileno, b)
|
||||||
|
except (IOError, OSError) as ex:
|
||||||
|
if ex.args[0] not in ignored_errors:
|
||||||
|
raise
|
||||||
|
self.hub.wait(self._write_event)
|
||||||
|
|
||||||
|
def seek(self, offset, whence=0):
|
||||||
|
return os.lseek(self._fileno, offset, whence)
|
||||||
|
|
||||||
|
class FlushingBufferedWriter(BufferedWriter):
|
||||||
|
|
||||||
|
def write(self, b):
|
||||||
|
ret = BufferedWriter.write(self, b)
|
||||||
|
self.flush()
|
||||||
|
return ret
|
||||||
|
|
||||||
|
class FileObjectPosix(FileObjectBase):
|
||||||
|
"""
|
||||||
|
A file-like object that operates on non-blocking files but
|
||||||
|
provides a synchronous, cooperative interface.
|
||||||
|
|
||||||
|
.. caution::
|
||||||
|
This object is only effective wrapping files that can be used meaningfully
|
||||||
|
with :func:`select.select` such as sockets and pipes.
|
||||||
|
|
||||||
|
In general, on most platforms, operations on regular files
|
||||||
|
(e.g., ``open('a_file.txt')``) are considered non-blocking
|
||||||
|
already, even though they can take some time to complete as
|
||||||
|
data is copied to the kernel and flushed to disk: this time
|
||||||
|
is relatively bounded compared to sockets or pipes, though.
|
||||||
|
A :func:`~os.read` or :func:`~os.write` call on such a file
|
||||||
|
will still effectively block for some small period of time.
|
||||||
|
Therefore, wrapping this class around a regular file is
|
||||||
|
unlikely to make IO gevent-friendly: reading or writing large
|
||||||
|
amounts of data could still block the event loop.
|
||||||
|
|
||||||
|
If you'll be working with regular files and doing IO in large
|
||||||
|
chunks, you may consider using
|
||||||
|
:class:`~gevent.fileobject.FileObjectThread` or
|
||||||
|
:func:`~gevent.os.tp_read` and :func:`~gevent.os.tp_write` to bypass this
|
||||||
|
concern.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
Random read/write (e.g., ``mode='rwb'``) is not supported.
|
||||||
|
For that, use :class:`io.BufferedRWPair` around two instance of this
|
||||||
|
class.
|
||||||
|
|
||||||
|
.. tip::
|
||||||
|
Although this object provides a :meth:`fileno` method and so
|
||||||
|
can itself be passed to :func:`fcntl.fcntl`, setting the
|
||||||
|
:data:`os.O_NONBLOCK` flag will have no effect (reads will
|
||||||
|
still block the greenlet, although other greenlets can run).
|
||||||
|
However, removing that flag *will cause this object to no
|
||||||
|
longer be cooperative* (other greenlets will no longer run).
|
||||||
|
|
||||||
|
You can use the internal ``fileio`` attribute of this object
|
||||||
|
(a :class:`io.RawIOBase`) to perform non-blocking byte reads.
|
||||||
|
Note, however, that once you begin directly using this
|
||||||
|
attribute, the results from using methods of *this* object
|
||||||
|
are undefined, especially in text mode. (See :issue:`222`.)
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1
|
||||||
|
Now uses the :mod:`io` package internally. Under Python 2, previously
|
||||||
|
used the undocumented class :class:`socket._fileobject`. This provides
|
||||||
|
better file-like semantics (and portability to Python 3).
|
||||||
|
.. versionchanged:: 1.2a1
|
||||||
|
Document the ``fileio`` attribute for non-blocking reads.
|
||||||
|
"""
|
||||||
|
|
||||||
|
#: platform specific default for the *bufsize* parameter
|
||||||
|
default_bufsize = io.DEFAULT_BUFFER_SIZE
|
||||||
|
|
||||||
|
def __init__(self, fobj, mode='rb', bufsize=-1, close=True):
|
||||||
|
"""
|
||||||
|
:param fobj: Either an integer fileno, or an object supporting the
|
||||||
|
usual :meth:`socket.fileno` method. The file *will* be
|
||||||
|
put in non-blocking mode using :func:`gevent.os.make_nonblocking`.
|
||||||
|
:keyword str mode: The manner of access to the file, one of "rb", "rU" or "wb"
|
||||||
|
(where the "b" or "U" can be omitted).
|
||||||
|
If "U" is part of the mode, IO will be done on text, otherwise bytes.
|
||||||
|
:keyword int bufsize: If given, the size of the buffer to use. The default
|
||||||
|
value means to use a platform-specific default
|
||||||
|
Other values are interpreted as for the :mod:`io` package.
|
||||||
|
Buffering is ignored in text mode.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.2a1
|
||||||
|
|
||||||
|
A bufsize of 0 in write mode is no longer forced to be 1.
|
||||||
|
Instead, the underlying buffer is flushed after every write
|
||||||
|
operation to simulate a bufsize of 0. In gevent 1.0, a
|
||||||
|
bufsize of 0 was flushed when a newline was written, while
|
||||||
|
in gevent 1.1 it was flushed when more than one byte was
|
||||||
|
written. Note that this may have performance impacts.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if isinstance(fobj, int):
|
||||||
|
fileno = fobj
|
||||||
|
fobj = None
|
||||||
|
else:
|
||||||
|
fileno = fobj.fileno()
|
||||||
|
if not isinstance(fileno, int):
|
||||||
|
raise TypeError('fileno must be int: %r' % fileno)
|
||||||
|
|
||||||
|
orig_mode = mode
|
||||||
|
mode = (mode or 'rb').replace('b', '')
|
||||||
|
if 'U' in mode:
|
||||||
|
self._translate = True
|
||||||
|
mode = mode.replace('U', '')
|
||||||
|
else:
|
||||||
|
self._translate = False
|
||||||
|
|
||||||
|
if len(mode) != 1 and mode not in 'rw': # pragma: no cover
|
||||||
|
# Python 3 builtin `open` raises a ValueError for invalid modes;
|
||||||
|
# Python 2 ignores it. In the past, we raised an AssertionError, if __debug__ was
|
||||||
|
# enabled (which it usually was). Match Python 3 because it makes more sense
|
||||||
|
# and because __debug__ may not be enabled.
|
||||||
|
# NOTE: This is preventing a mode like 'rwb' for binary random access;
|
||||||
|
# that code was never tested and was explicitly marked as "not used"
|
||||||
|
raise ValueError('mode can only be [rb, rU, wb], not %r' % (orig_mode,))
|
||||||
|
|
||||||
|
self._fobj = fobj
|
||||||
|
|
||||||
|
# This attribute is documented as available for non-blocking reads.
|
||||||
|
self.fileio = GreenFileDescriptorIO(fileno, mode, closefd=close)
|
||||||
|
|
||||||
|
self._orig_bufsize = bufsize
|
||||||
|
if bufsize < 0 or bufsize == 1:
|
||||||
|
bufsize = self.default_bufsize
|
||||||
|
elif bufsize == 0:
|
||||||
|
bufsize = 1
|
||||||
|
|
||||||
|
if mode == 'r':
|
||||||
|
IOFamily = BufferedReader
|
||||||
|
else:
|
||||||
|
assert mode == 'w'
|
||||||
|
IOFamily = BufferedWriter
|
||||||
|
if self._orig_bufsize == 0:
|
||||||
|
# We could also simply pass self.fileio as *io*, but this way
|
||||||
|
# we at least consistently expose a BufferedWriter in our *io*
|
||||||
|
# attribute.
|
||||||
|
IOFamily = FlushingBufferedWriter
|
||||||
|
|
||||||
|
super(FileObjectPosix, self).__init__(IOFamily(self.fileio, bufsize), close)
|
||||||
|
|
||||||
|
def _do_close(self, fobj, closefd):
|
||||||
|
try:
|
||||||
|
fobj.close()
|
||||||
|
# self.fileio already knows whether or not to close the
|
||||||
|
# file descriptor
|
||||||
|
self.fileio.close()
|
||||||
|
finally:
|
||||||
|
self._fobj = None
|
||||||
|
self.fileio = None
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self._io
|
||||||
23
python/gevent/_semaphore.pxd
Normal file
23
python/gevent/_semaphore.pxd
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
cdef class Semaphore:
|
||||||
|
cdef public int counter
|
||||||
|
cdef readonly object _links
|
||||||
|
cdef readonly object _notifier
|
||||||
|
cdef public int _dirty
|
||||||
|
cdef object __weakref__
|
||||||
|
|
||||||
|
cpdef bint locked(self)
|
||||||
|
cpdef int release(self) except -1000
|
||||||
|
cpdef rawlink(self, object callback)
|
||||||
|
cpdef unlink(self, object callback)
|
||||||
|
cpdef _start_notify(self)
|
||||||
|
cpdef _notify_links(self)
|
||||||
|
cdef _do_wait(self, object timeout)
|
||||||
|
cpdef int wait(self, object timeout=*) except -1000
|
||||||
|
cpdef bint acquire(self, int blocking=*, object timeout=*) except -1000
|
||||||
|
cpdef __enter__(self)
|
||||||
|
cpdef __exit__(self, object t, object v, object tb)
|
||||||
|
|
||||||
|
cdef class BoundedSemaphore(Semaphore):
|
||||||
|
cdef readonly int _initial_value
|
||||||
|
|
||||||
|
cpdef int release(self) except -1000
|
||||||
269
python/gevent/_semaphore.py
Normal file
269
python/gevent/_semaphore.py
Normal file
@@ -0,0 +1,269 @@
|
|||||||
|
import sys
|
||||||
|
from gevent.hub import get_hub, getcurrent
|
||||||
|
from gevent.timeout import Timeout
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['Semaphore', 'BoundedSemaphore']
|
||||||
|
|
||||||
|
|
||||||
|
class Semaphore(object):
|
||||||
|
"""
|
||||||
|
Semaphore(value=1) -> Semaphore
|
||||||
|
|
||||||
|
A semaphore manages a counter representing the number of release()
|
||||||
|
calls minus the number of acquire() calls, plus an initial value.
|
||||||
|
The acquire() method blocks if necessary until it can return
|
||||||
|
without making the counter negative.
|
||||||
|
|
||||||
|
If not given, ``value`` defaults to 1.
|
||||||
|
|
||||||
|
The semaphore is a context manager and can be used in ``with`` statements.
|
||||||
|
|
||||||
|
This Semaphore's ``__exit__`` method does not call the trace function
|
||||||
|
on CPython, but does under PyPy.
|
||||||
|
|
||||||
|
.. seealso:: :class:`BoundedSemaphore` for a safer version that prevents
|
||||||
|
some classes of bugs.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, value=1):
|
||||||
|
if value < 0:
|
||||||
|
raise ValueError("semaphore initial value must be >= 0")
|
||||||
|
self.counter = value
|
||||||
|
self._dirty = False
|
||||||
|
# In PyPy 2.6.1 with Cython 0.23, `cdef public` or `cdef
|
||||||
|
# readonly` or simply `cdef` attributes of type `object` can appear to leak if
|
||||||
|
# a Python subclass is used (this is visible simply
|
||||||
|
# instantiating this subclass if _links=[]). Our _links and
|
||||||
|
# _notifier are such attributes, and gevent.thread subclasses
|
||||||
|
# this class. Thus, we carefully manage the lifetime of the
|
||||||
|
# objects we put in these attributes so that, in the normal
|
||||||
|
# case of a semaphore used correctly (deallocated when it's not
|
||||||
|
# locked and no one is waiting), the leak goes away (because
|
||||||
|
# these objects are back to None). This can also be solved on PyPy
|
||||||
|
# by simply not declaring these objects in the pxd file, but that doesn't work for
|
||||||
|
# CPython ("No attribute...")
|
||||||
|
# See https://github.com/gevent/gevent/issues/660
|
||||||
|
self._links = None
|
||||||
|
self._notifier = None
|
||||||
|
# we don't want to do get_hub() here to allow defining module-level locks
|
||||||
|
# without initializing the hub
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
params = (self.__class__.__name__, self.counter, len(self._links) if self._links else 0)
|
||||||
|
return '<%s counter=%s _links[%s]>' % params
|
||||||
|
|
||||||
|
def locked(self):
|
||||||
|
"""Return a boolean indicating whether the semaphore can be acquired.
|
||||||
|
Most useful with binary semaphores."""
|
||||||
|
return self.counter <= 0
|
||||||
|
|
||||||
|
def release(self):
|
||||||
|
"""
|
||||||
|
Release the semaphore, notifying any waiters if needed.
|
||||||
|
"""
|
||||||
|
self.counter += 1
|
||||||
|
self._start_notify()
|
||||||
|
return self.counter
|
||||||
|
|
||||||
|
def _start_notify(self):
|
||||||
|
if self._links and self.counter > 0 and not self._notifier:
|
||||||
|
# We create a new self._notifier each time through the loop,
|
||||||
|
# if needed. (it has a __bool__ method that tells whether it has
|
||||||
|
# been run; once it's run once---at the end of the loop---it becomes
|
||||||
|
# false.)
|
||||||
|
# NOTE: Passing the bound method will cause a memory leak on PyPy
|
||||||
|
# with Cython <= 0.23.3. You must use >= 0.23.4.
|
||||||
|
# See https://bitbucket.org/pypy/pypy/issues/2149/memory-leak-for-python-subclass-of-cpyext#comment-22371546
|
||||||
|
self._notifier = get_hub().loop.run_callback(self._notify_links)
|
||||||
|
|
||||||
|
def _notify_links(self):
|
||||||
|
# Subclasses CANNOT override. This is a cdef method.
|
||||||
|
|
||||||
|
# We release self._notifier here. We are called by it
|
||||||
|
# at the end of the loop, and it is now false in a boolean way (as soon
|
||||||
|
# as this method returns).
|
||||||
|
# If we get acquired/released again, we will create a new one, but there's
|
||||||
|
# no need to keep it around until that point (making it potentially climb
|
||||||
|
# into older GC generations, notably on PyPy)
|
||||||
|
notifier = self._notifier
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
self._dirty = False
|
||||||
|
if not self._links:
|
||||||
|
# In case we were manually unlinked before
|
||||||
|
# the callback. Which shouldn't happen
|
||||||
|
return
|
||||||
|
for link in self._links:
|
||||||
|
if self.counter <= 0:
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
link(self) # Must use Cython >= 0.23.4 on PyPy else this leaks memory
|
||||||
|
except: # pylint:disable=bare-except
|
||||||
|
getcurrent().handle_error((link, self), *sys.exc_info())
|
||||||
|
if self._dirty:
|
||||||
|
# We mutated self._links so we need to start over
|
||||||
|
break
|
||||||
|
if not self._dirty:
|
||||||
|
return
|
||||||
|
finally:
|
||||||
|
# We should not have created a new notifier even if callbacks
|
||||||
|
# released us because we loop through *all* of our links on the
|
||||||
|
# same callback while self._notifier is still true.
|
||||||
|
assert self._notifier is notifier
|
||||||
|
self._notifier = None
|
||||||
|
|
||||||
|
def rawlink(self, callback):
|
||||||
|
"""
|
||||||
|
rawlink(callback) -> None
|
||||||
|
|
||||||
|
Register a callback to call when a counter is more than zero.
|
||||||
|
|
||||||
|
*callback* will be called in the :class:`Hub <gevent.hub.Hub>`, so it must not use blocking gevent API.
|
||||||
|
*callback* will be passed one argument: this instance.
|
||||||
|
|
||||||
|
This method is normally called automatically by :meth:`acquire` and :meth:`wait`; most code
|
||||||
|
will not need to use it.
|
||||||
|
"""
|
||||||
|
if not callable(callback):
|
||||||
|
raise TypeError('Expected callable:', callback)
|
||||||
|
if self._links is None:
|
||||||
|
self._links = [callback]
|
||||||
|
else:
|
||||||
|
self._links.append(callback)
|
||||||
|
self._dirty = True
|
||||||
|
|
||||||
|
def unlink(self, callback):
|
||||||
|
"""
|
||||||
|
unlink(callback) -> None
|
||||||
|
|
||||||
|
Remove the callback set by :meth:`rawlink`.
|
||||||
|
|
||||||
|
This method is normally called automatically by :meth:`acquire` and :meth:`wait`; most
|
||||||
|
code will not need to use it.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
self._links.remove(callback)
|
||||||
|
self._dirty = True
|
||||||
|
except (ValueError, AttributeError):
|
||||||
|
pass
|
||||||
|
if not self._links:
|
||||||
|
self._links = None
|
||||||
|
# TODO: Cancel a notifier if there are no links?
|
||||||
|
|
||||||
|
def _do_wait(self, timeout):
|
||||||
|
"""
|
||||||
|
Wait for up to *timeout* seconds to expire. If timeout
|
||||||
|
elapses, return the exception. Otherwise, return None.
|
||||||
|
Raises timeout if a different timer expires.
|
||||||
|
"""
|
||||||
|
switch = getcurrent().switch
|
||||||
|
self.rawlink(switch)
|
||||||
|
try:
|
||||||
|
timer = Timeout._start_new_or_dummy(timeout)
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
result = get_hub().switch()
|
||||||
|
assert result is self, 'Invalid switch into Semaphore.wait/acquire(): %r' % (result, )
|
||||||
|
except Timeout as ex:
|
||||||
|
if ex is not timer:
|
||||||
|
raise
|
||||||
|
return ex
|
||||||
|
finally:
|
||||||
|
timer.cancel()
|
||||||
|
finally:
|
||||||
|
self.unlink(switch)
|
||||||
|
|
||||||
|
def wait(self, timeout=None):
|
||||||
|
"""
|
||||||
|
wait(timeout=None) -> int
|
||||||
|
|
||||||
|
Wait until it is possible to acquire this semaphore, or until the optional
|
||||||
|
*timeout* elapses.
|
||||||
|
|
||||||
|
.. caution:: If this semaphore was initialized with a size of 0,
|
||||||
|
this method will block forever if no timeout is given.
|
||||||
|
|
||||||
|
:keyword float timeout: If given, specifies the maximum amount of seconds
|
||||||
|
this method will block.
|
||||||
|
:return: A number indicating how many times the semaphore can be acquired
|
||||||
|
before blocking.
|
||||||
|
"""
|
||||||
|
if self.counter > 0:
|
||||||
|
return self.counter
|
||||||
|
|
||||||
|
self._do_wait(timeout) # return value irrelevant, whether we got it or got a timeout
|
||||||
|
return self.counter
|
||||||
|
|
||||||
|
def acquire(self, blocking=True, timeout=None):
|
||||||
|
"""
|
||||||
|
acquire(blocking=True, timeout=None) -> bool
|
||||||
|
|
||||||
|
Acquire the semaphore.
|
||||||
|
|
||||||
|
.. caution:: If this semaphore was initialized with a size of 0,
|
||||||
|
this method will block forever (unless a timeout is given or blocking is
|
||||||
|
set to false).
|
||||||
|
|
||||||
|
:keyword bool blocking: If True (the default), this function will block
|
||||||
|
until the semaphore is acquired.
|
||||||
|
:keyword float timeout: If given, specifies the maximum amount of seconds
|
||||||
|
this method will block.
|
||||||
|
:return: A boolean indicating whether the semaphore was acquired.
|
||||||
|
If ``blocking`` is True and ``timeout`` is None (the default), then
|
||||||
|
(so long as this semaphore was initialized with a size greater than 0)
|
||||||
|
this will always return True. If a timeout was given, and it expired before
|
||||||
|
the semaphore was acquired, False will be returned. (Note that this can still
|
||||||
|
raise a ``Timeout`` exception, if some other caller had already started a timer.)
|
||||||
|
"""
|
||||||
|
if self.counter > 0:
|
||||||
|
self.counter -= 1
|
||||||
|
return True
|
||||||
|
|
||||||
|
if not blocking:
|
||||||
|
return False
|
||||||
|
|
||||||
|
timeout = self._do_wait(timeout)
|
||||||
|
if timeout is not None:
|
||||||
|
# Our timer expired.
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Neither our timer no another one expired, so we blocked until
|
||||||
|
# awoke. Therefore, the counter is ours
|
||||||
|
self.counter -= 1
|
||||||
|
assert self.counter >= 0
|
||||||
|
return True
|
||||||
|
|
||||||
|
_py3k_acquire = acquire # PyPy needs this; it must be static for Cython
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.acquire()
|
||||||
|
|
||||||
|
def __exit__(self, t, v, tb):
|
||||||
|
self.release()
|
||||||
|
|
||||||
|
|
||||||
|
class BoundedSemaphore(Semaphore):
|
||||||
|
"""
|
||||||
|
BoundedSemaphore(value=1) -> BoundedSemaphore
|
||||||
|
|
||||||
|
A bounded semaphore checks to make sure its current value doesn't
|
||||||
|
exceed its initial value. If it does, :class:`ValueError` is
|
||||||
|
raised. In most situations semaphores are used to guard resources
|
||||||
|
with limited capacity. If the semaphore is released too many times
|
||||||
|
it's a sign of a bug.
|
||||||
|
|
||||||
|
If not given, *value* defaults to 1.
|
||||||
|
"""
|
||||||
|
|
||||||
|
#: For monkey-patching, allow changing the class of error we raise
|
||||||
|
_OVER_RELEASE_ERROR = ValueError
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
Semaphore.__init__(self, *args, **kwargs)
|
||||||
|
self._initial_value = self.counter
|
||||||
|
|
||||||
|
def release(self):
|
||||||
|
if self.counter >= self._initial_value:
|
||||||
|
raise self._OVER_RELEASE_ERROR("Semaphore released too many times")
|
||||||
|
return Semaphore.release(self)
|
||||||
539
python/gevent/_socket2.py
Normal file
539
python/gevent/_socket2.py
Normal file
@@ -0,0 +1,539 @@
|
|||||||
|
# Copyright (c) 2009-2014 Denis Bilenko and gevent contributors. See LICENSE for details.
|
||||||
|
"""
|
||||||
|
Python 2 socket module.
|
||||||
|
"""
|
||||||
|
# Our import magic sadly makes this warning useless
|
||||||
|
# pylint: disable=undefined-variable
|
||||||
|
|
||||||
|
import time
|
||||||
|
from gevent import _socketcommon
|
||||||
|
from gevent._util import copy_globals
|
||||||
|
from gevent._compat import PYPY
|
||||||
|
|
||||||
|
copy_globals(_socketcommon, globals(),
|
||||||
|
names_to_ignore=_socketcommon.__py3_imports__ + _socketcommon.__extensions__,
|
||||||
|
dunder_names_to_keep=())
|
||||||
|
|
||||||
|
__socket__ = _socketcommon.__socket__
|
||||||
|
__implements__ = _socketcommon._implements
|
||||||
|
__extensions__ = _socketcommon.__extensions__
|
||||||
|
__imports__ = [i for i in _socketcommon.__imports__ if i not in _socketcommon.__py3_imports__]
|
||||||
|
__dns__ = _socketcommon.__dns__
|
||||||
|
try:
|
||||||
|
_fileobject = __socket__._fileobject
|
||||||
|
_socketmethods = __socket__._socketmethods
|
||||||
|
except AttributeError:
|
||||||
|
# Allow this module to be imported under Python 3
|
||||||
|
# for building the docs
|
||||||
|
_fileobject = object
|
||||||
|
_socketmethods = ('bind', 'connect', 'connect_ex',
|
||||||
|
'fileno', 'listen', 'getpeername',
|
||||||
|
'getsockname', 'getsockopt',
|
||||||
|
'setsockopt', 'sendall',
|
||||||
|
'setblocking', 'settimeout',
|
||||||
|
'gettimeout', 'shutdown')
|
||||||
|
else:
|
||||||
|
# Python 2 doesn't natively support with statements on _fileobject;
|
||||||
|
# but it eases our test cases if we can do the same with on both Py3
|
||||||
|
# and Py2. Implementation copied from Python 3
|
||||||
|
if not hasattr(_fileobject, '__enter__'):
|
||||||
|
# we could either patch in place:
|
||||||
|
#_fileobject.__enter__ = lambda self: self
|
||||||
|
#_fileobject.__exit__ = lambda self, *args: self.close() if not self.closed else None
|
||||||
|
# or we could subclass. subclassing has the benefit of not
|
||||||
|
# changing the behaviour of the stdlib if we're just imported; OTOH,
|
||||||
|
# under Python 2.6/2.7, test_urllib2net.py asserts that the class IS
|
||||||
|
# socket._fileobject (sigh), so we have to work around that.
|
||||||
|
class _fileobject(_fileobject): # pylint:disable=function-redefined
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, *args):
|
||||||
|
if not self.closed:
|
||||||
|
self.close()
|
||||||
|
|
||||||
|
def _get_memory(data):
|
||||||
|
try:
|
||||||
|
mv = memoryview(data)
|
||||||
|
if mv.shape:
|
||||||
|
return mv
|
||||||
|
# No shape, probably working with a ctypes object,
|
||||||
|
# or something else exotic that supports the buffer interface
|
||||||
|
return mv.tobytes()
|
||||||
|
except TypeError:
|
||||||
|
# fixes "python2.7 array.array doesn't support memoryview used in
|
||||||
|
# gevent.socket.send" issue
|
||||||
|
# (http://code.google.com/p/gevent/issues/detail?id=94)
|
||||||
|
return buffer(data)
|
||||||
|
|
||||||
|
|
||||||
|
class _closedsocket(object):
|
||||||
|
__slots__ = []
|
||||||
|
|
||||||
|
def _dummy(*args, **kwargs): # pylint:disable=no-method-argument,unused-argument
|
||||||
|
raise error(EBADF, 'Bad file descriptor')
|
||||||
|
# All _delegate_methods must also be initialized here.
|
||||||
|
send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy
|
||||||
|
|
||||||
|
if PYPY:
|
||||||
|
|
||||||
|
def _drop(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _reuse(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
__getattr__ = _dummy
|
||||||
|
|
||||||
|
|
||||||
|
timeout_default = object()
|
||||||
|
|
||||||
|
|
||||||
|
class socket(object):
|
||||||
|
"""
|
||||||
|
gevent `socket.socket <https://docs.python.org/2/library/socket.html#socket-objects>`_
|
||||||
|
for Python 2.
|
||||||
|
|
||||||
|
This object should have the same API as the standard library socket linked to above. Not all
|
||||||
|
methods are specifically documented here; when they are they may point out a difference
|
||||||
|
to be aware of or may document a method the standard library does not.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# pylint:disable=too-many-public-methods
|
||||||
|
|
||||||
|
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
|
||||||
|
if _sock is None:
|
||||||
|
self._sock = _realsocket(family, type, proto)
|
||||||
|
self.timeout = _socket.getdefaulttimeout()
|
||||||
|
else:
|
||||||
|
if hasattr(_sock, '_sock'):
|
||||||
|
self._sock = _sock._sock
|
||||||
|
self.timeout = getattr(_sock, 'timeout', False)
|
||||||
|
if self.timeout is False:
|
||||||
|
self.timeout = _socket.getdefaulttimeout()
|
||||||
|
else:
|
||||||
|
self._sock = _sock
|
||||||
|
self.timeout = _socket.getdefaulttimeout()
|
||||||
|
if PYPY:
|
||||||
|
self._sock._reuse()
|
||||||
|
self._sock.setblocking(0)
|
||||||
|
fileno = self._sock.fileno()
|
||||||
|
self.hub = get_hub()
|
||||||
|
io = self.hub.loop.io
|
||||||
|
self._read_event = io(fileno, 1)
|
||||||
|
self._write_event = io(fileno, 2)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._formatinfo())
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return '<%s %s>' % (type(self).__name__, self._formatinfo())
|
||||||
|
|
||||||
|
def _formatinfo(self):
|
||||||
|
# pylint:disable=broad-except
|
||||||
|
try:
|
||||||
|
fileno = self.fileno()
|
||||||
|
except Exception as ex:
|
||||||
|
fileno = str(ex)
|
||||||
|
try:
|
||||||
|
sockname = self.getsockname()
|
||||||
|
sockname = '%s:%s' % sockname
|
||||||
|
except Exception:
|
||||||
|
sockname = None
|
||||||
|
try:
|
||||||
|
peername = self.getpeername()
|
||||||
|
peername = '%s:%s' % peername
|
||||||
|
except Exception:
|
||||||
|
peername = None
|
||||||
|
result = 'fileno=%s' % fileno
|
||||||
|
if sockname is not None:
|
||||||
|
result += ' sock=' + str(sockname)
|
||||||
|
if peername is not None:
|
||||||
|
result += ' peer=' + str(peername)
|
||||||
|
if getattr(self, 'timeout', None) is not None:
|
||||||
|
result += ' timeout=' + str(self.timeout)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _get_ref(self):
|
||||||
|
return self._read_event.ref or self._write_event.ref
|
||||||
|
|
||||||
|
def _set_ref(self, value):
|
||||||
|
self._read_event.ref = value
|
||||||
|
self._write_event.ref = value
|
||||||
|
|
||||||
|
ref = property(_get_ref, _set_ref)
|
||||||
|
|
||||||
|
def _wait(self, watcher, timeout_exc=timeout('timed out')):
|
||||||
|
"""Block the current greenlet until *watcher* has pending events.
|
||||||
|
|
||||||
|
If *timeout* is non-negative, then *timeout_exc* is raised after *timeout* second has passed.
|
||||||
|
By default *timeout_exc* is ``socket.timeout('timed out')``.
|
||||||
|
|
||||||
|
If :func:`cancel_wait` is called, raise ``socket.error(EBADF, 'File descriptor was closed in another greenlet')``.
|
||||||
|
"""
|
||||||
|
if watcher.callback is not None:
|
||||||
|
raise _socketcommon.ConcurrentObjectUseError('This socket is already used by another greenlet: %r' % (watcher.callback, ))
|
||||||
|
if self.timeout is not None:
|
||||||
|
timeout = Timeout.start_new(self.timeout, timeout_exc, ref=False)
|
||||||
|
else:
|
||||||
|
timeout = None
|
||||||
|
try:
|
||||||
|
self.hub.wait(watcher)
|
||||||
|
finally:
|
||||||
|
if timeout is not None:
|
||||||
|
timeout.cancel()
|
||||||
|
|
||||||
|
def accept(self):
|
||||||
|
sock = self._sock
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
client_socket, address = sock.accept()
|
||||||
|
break
|
||||||
|
except error as ex:
|
||||||
|
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._read_event)
|
||||||
|
sockobj = socket(_sock=client_socket)
|
||||||
|
if PYPY:
|
||||||
|
client_socket._drop()
|
||||||
|
return sockobj, address
|
||||||
|
|
||||||
|
def close(self, _closedsocket=_closedsocket, cancel_wait_ex=cancel_wait_ex):
|
||||||
|
# This function should not reference any globals. See Python issue #808164.
|
||||||
|
self.hub.cancel_wait(self._read_event, cancel_wait_ex)
|
||||||
|
self.hub.cancel_wait(self._write_event, cancel_wait_ex)
|
||||||
|
s = self._sock
|
||||||
|
self._sock = _closedsocket()
|
||||||
|
if PYPY:
|
||||||
|
s._drop()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def closed(self):
|
||||||
|
return isinstance(self._sock, _closedsocket)
|
||||||
|
|
||||||
|
def connect(self, address):
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
return self._sock.connect(address)
|
||||||
|
sock = self._sock
|
||||||
|
if isinstance(address, tuple):
|
||||||
|
r = getaddrinfo(address[0], address[1], sock.family)
|
||||||
|
address = r[0][-1]
|
||||||
|
if self.timeout is not None:
|
||||||
|
timer = Timeout.start_new(self.timeout, timeout('timed out'))
|
||||||
|
else:
|
||||||
|
timer = None
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
err = sock.getsockopt(SOL_SOCKET, SO_ERROR)
|
||||||
|
if err:
|
||||||
|
raise error(err, strerror(err))
|
||||||
|
result = sock.connect_ex(address)
|
||||||
|
if not result or result == EISCONN:
|
||||||
|
break
|
||||||
|
elif (result in (EWOULDBLOCK, EINPROGRESS, EALREADY)) or (result == EINVAL and is_windows):
|
||||||
|
self._wait(self._write_event)
|
||||||
|
else:
|
||||||
|
raise error(result, strerror(result))
|
||||||
|
finally:
|
||||||
|
if timer is not None:
|
||||||
|
timer.cancel()
|
||||||
|
|
||||||
|
def connect_ex(self, address):
|
||||||
|
try:
|
||||||
|
return self.connect(address) or 0
|
||||||
|
except timeout:
|
||||||
|
return EAGAIN
|
||||||
|
except error as ex:
|
||||||
|
if type(ex) is error: # pylint:disable=unidiomatic-typecheck
|
||||||
|
return ex.args[0]
|
||||||
|
else:
|
||||||
|
raise # gaierror is not silenced by connect_ex
|
||||||
|
|
||||||
|
def dup(self):
|
||||||
|
"""dup() -> socket object
|
||||||
|
|
||||||
|
Return a new socket object connected to the same system resource.
|
||||||
|
Note, that the new socket does not inherit the timeout."""
|
||||||
|
return socket(_sock=self._sock)
|
||||||
|
|
||||||
|
def makefile(self, mode='r', bufsize=-1):
|
||||||
|
# Two things to look out for:
|
||||||
|
# 1) Closing the original socket object should not close the
|
||||||
|
# socket (hence creating a new instance)
|
||||||
|
# 2) The resulting fileobject must keep the timeout in order
|
||||||
|
# to be compatible with the stdlib's socket.makefile.
|
||||||
|
# Pass self as _sock to preserve timeout.
|
||||||
|
fobj = _fileobject(type(self)(_sock=self), mode, bufsize)
|
||||||
|
if PYPY:
|
||||||
|
self._sock._drop()
|
||||||
|
return fobj
|
||||||
|
|
||||||
|
def recv(self, *args):
|
||||||
|
sock = self._sock # keeping the reference so that fd is not closed during waiting
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return sock.recv(*args)
|
||||||
|
except error as ex:
|
||||||
|
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
# QQQ without clearing exc_info test__refcount.test_clean_exit fails
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._read_event)
|
||||||
|
|
||||||
|
def recvfrom(self, *args):
|
||||||
|
sock = self._sock
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return sock.recvfrom(*args)
|
||||||
|
except error as ex:
|
||||||
|
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._read_event)
|
||||||
|
|
||||||
|
def recvfrom_into(self, *args):
|
||||||
|
sock = self._sock
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return sock.recvfrom_into(*args)
|
||||||
|
except error as ex:
|
||||||
|
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._read_event)
|
||||||
|
|
||||||
|
def recv_into(self, *args):
|
||||||
|
sock = self._sock
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return sock.recv_into(*args)
|
||||||
|
except error as ex:
|
||||||
|
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._read_event)
|
||||||
|
|
||||||
|
def send(self, data, flags=0, timeout=timeout_default):
|
||||||
|
sock = self._sock
|
||||||
|
if timeout is timeout_default:
|
||||||
|
timeout = self.timeout
|
||||||
|
try:
|
||||||
|
return sock.send(data, flags)
|
||||||
|
except error as ex:
|
||||||
|
if ex.args[0] != EWOULDBLOCK or timeout == 0.0:
|
||||||
|
raise
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._write_event)
|
||||||
|
try:
|
||||||
|
return sock.send(data, flags)
|
||||||
|
except error as ex2:
|
||||||
|
if ex2.args[0] == EWOULDBLOCK:
|
||||||
|
return 0
|
||||||
|
raise
|
||||||
|
|
||||||
|
def __send_chunk(self, data_memory, flags, timeleft, end):
|
||||||
|
"""
|
||||||
|
Send the complete contents of ``data_memory`` before returning.
|
||||||
|
This is the core loop around :meth:`send`.
|
||||||
|
|
||||||
|
:param timeleft: Either ``None`` if there is no timeout involved,
|
||||||
|
or a float indicating the timeout to use.
|
||||||
|
:param end: Either ``None`` if there is no timeout involved, or
|
||||||
|
a float giving the absolute end time.
|
||||||
|
:return: An updated value for ``timeleft`` (or None)
|
||||||
|
:raises timeout: If ``timeleft`` was given and elapsed while
|
||||||
|
sending this chunk.
|
||||||
|
"""
|
||||||
|
data_sent = 0
|
||||||
|
len_data_memory = len(data_memory)
|
||||||
|
started_timer = 0
|
||||||
|
while data_sent < len_data_memory:
|
||||||
|
chunk = data_memory[data_sent:]
|
||||||
|
if timeleft is None:
|
||||||
|
data_sent += self.send(chunk, flags)
|
||||||
|
elif started_timer and timeleft <= 0:
|
||||||
|
# Check before sending to guarantee a check
|
||||||
|
# happens even if each chunk successfully sends its data
|
||||||
|
# (especially important for SSL sockets since they have large
|
||||||
|
# buffers). But only do this if we've actually tried to
|
||||||
|
# send something once to avoid spurious timeouts on non-blocking
|
||||||
|
# sockets.
|
||||||
|
raise timeout('timed out')
|
||||||
|
else:
|
||||||
|
started_timer = 1
|
||||||
|
data_sent += self.send(chunk, flags, timeout=timeleft)
|
||||||
|
timeleft = end - time.time()
|
||||||
|
|
||||||
|
return timeleft
|
||||||
|
|
||||||
|
def sendall(self, data, flags=0):
|
||||||
|
if isinstance(data, unicode):
|
||||||
|
data = data.encode()
|
||||||
|
# this sendall is also reused by gevent.ssl.SSLSocket subclass,
|
||||||
|
# so it should not call self._sock methods directly
|
||||||
|
data_memory = _get_memory(data)
|
||||||
|
len_data_memory = len(data_memory)
|
||||||
|
if not len_data_memory:
|
||||||
|
# Don't send empty data, can cause SSL EOFError.
|
||||||
|
# See issue 719
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# On PyPy up through 2.6.0, subviews of a memoryview() object
|
||||||
|
# copy the underlying bytes the first time the builtin
|
||||||
|
# socket.send() method is called. On a non-blocking socket
|
||||||
|
# (that thus calls socket.send() many times) with a large
|
||||||
|
# input, this results in many repeated copies of an ever
|
||||||
|
# smaller string, depending on the networking buffering. For
|
||||||
|
# example, if each send() can process 1MB of a 50MB input, and
|
||||||
|
# we naively pass the entire remaining subview each time, we'd
|
||||||
|
# copy 49MB, 48MB, 47MB, etc, thus completely killing
|
||||||
|
# performance. To workaround this problem, we work in
|
||||||
|
# reasonable, fixed-size chunks. This results in a 10x
|
||||||
|
# improvement to bench_sendall.py, while having no measurable impact on
|
||||||
|
# CPython (since it doesn't copy at all the only extra overhead is
|
||||||
|
# a few python function calls, which is negligible for large inputs).
|
||||||
|
|
||||||
|
# See https://bitbucket.org/pypy/pypy/issues/2091/non-blocking-socketsend-slow-gevent
|
||||||
|
|
||||||
|
# Too small of a chunk (the socket's buf size is usually too
|
||||||
|
# small) results in reduced perf due to *too many* calls to send and too many
|
||||||
|
# small copies. With a buffer of 143K (the default on my system), for
|
||||||
|
# example, bench_sendall.py yields ~264MB/s, while using 1MB yields
|
||||||
|
# ~653MB/s (matching CPython). 1MB is arbitrary and might be better
|
||||||
|
# chosen, say, to match a page size?
|
||||||
|
chunk_size = max(self.getsockopt(SOL_SOCKET, SO_SNDBUF), 1024 * 1024) # pylint:disable=no-member
|
||||||
|
|
||||||
|
data_sent = 0
|
||||||
|
end = None
|
||||||
|
timeleft = None
|
||||||
|
if self.timeout is not None:
|
||||||
|
timeleft = self.timeout
|
||||||
|
end = time.time() + timeleft
|
||||||
|
|
||||||
|
while data_sent < len_data_memory:
|
||||||
|
chunk_end = min(data_sent + chunk_size, len_data_memory)
|
||||||
|
chunk = data_memory[data_sent:chunk_end]
|
||||||
|
|
||||||
|
timeleft = self.__send_chunk(chunk, flags, timeleft, end)
|
||||||
|
data_sent += len(chunk) # Guaranteed it sent the whole thing
|
||||||
|
|
||||||
|
def sendto(self, *args):
|
||||||
|
sock = self._sock
|
||||||
|
try:
|
||||||
|
return sock.sendto(*args)
|
||||||
|
except error as ex:
|
||||||
|
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._write_event)
|
||||||
|
try:
|
||||||
|
return sock.sendto(*args)
|
||||||
|
except error as ex2:
|
||||||
|
if ex2.args[0] == EWOULDBLOCK:
|
||||||
|
return 0
|
||||||
|
raise
|
||||||
|
|
||||||
|
def setblocking(self, flag):
|
||||||
|
if flag:
|
||||||
|
self.timeout = None
|
||||||
|
else:
|
||||||
|
self.timeout = 0.0
|
||||||
|
|
||||||
|
def settimeout(self, howlong):
|
||||||
|
if howlong is not None:
|
||||||
|
try:
|
||||||
|
f = howlong.__float__
|
||||||
|
except AttributeError:
|
||||||
|
raise TypeError('a float is required')
|
||||||
|
howlong = f()
|
||||||
|
if howlong < 0.0:
|
||||||
|
raise ValueError('Timeout value out of range')
|
||||||
|
self.__dict__['timeout'] = howlong # avoid recursion with any property on self.timeout
|
||||||
|
|
||||||
|
def gettimeout(self):
|
||||||
|
return self.__dict__['timeout'] # avoid recursion with any property on self.timeout
|
||||||
|
|
||||||
|
def shutdown(self, how):
|
||||||
|
if how == 0: # SHUT_RD
|
||||||
|
self.hub.cancel_wait(self._read_event, cancel_wait_ex)
|
||||||
|
elif how == 1: # SHUT_WR
|
||||||
|
self.hub.cancel_wait(self._write_event, cancel_wait_ex)
|
||||||
|
else:
|
||||||
|
self.hub.cancel_wait(self._read_event, cancel_wait_ex)
|
||||||
|
self.hub.cancel_wait(self._write_event, cancel_wait_ex)
|
||||||
|
self._sock.shutdown(how)
|
||||||
|
|
||||||
|
family = property(lambda self: self._sock.family)
|
||||||
|
type = property(lambda self: self._sock.type)
|
||||||
|
proto = property(lambda self: self._sock.proto)
|
||||||
|
|
||||||
|
def fileno(self):
|
||||||
|
return self._sock.fileno()
|
||||||
|
|
||||||
|
def getsockname(self):
|
||||||
|
return self._sock.getsockname()
|
||||||
|
|
||||||
|
def getpeername(self):
|
||||||
|
return self._sock.getpeername()
|
||||||
|
|
||||||
|
# delegate the functions that we haven't implemented to the real socket object
|
||||||
|
|
||||||
|
_s = "def %s(self, *args): return self._sock.%s(*args)\n\n"
|
||||||
|
_m = None
|
||||||
|
for _m in set(_socketmethods) - set(locals()):
|
||||||
|
exec(_s % (_m, _m,))
|
||||||
|
del _m, _s
|
||||||
|
|
||||||
|
if PYPY:
|
||||||
|
|
||||||
|
def _reuse(self):
|
||||||
|
self._sock._reuse()
|
||||||
|
|
||||||
|
def _drop(self):
|
||||||
|
self._sock._drop()
|
||||||
|
|
||||||
|
|
||||||
|
SocketType = socket
|
||||||
|
|
||||||
|
if hasattr(_socket, 'socketpair'):
|
||||||
|
|
||||||
|
def socketpair(family=getattr(_socket, 'AF_UNIX', _socket.AF_INET),
|
||||||
|
type=_socket.SOCK_STREAM, proto=0):
|
||||||
|
one, two = _socket.socketpair(family, type, proto)
|
||||||
|
result = socket(_sock=one), socket(_sock=two)
|
||||||
|
if PYPY:
|
||||||
|
one._drop()
|
||||||
|
two._drop()
|
||||||
|
return result
|
||||||
|
elif 'socketpair' in __implements__:
|
||||||
|
__implements__.remove('socketpair')
|
||||||
|
|
||||||
|
if hasattr(_socket, 'fromfd'):
|
||||||
|
|
||||||
|
def fromfd(fd, family, type, proto=0):
|
||||||
|
s = _socket.fromfd(fd, family, type, proto)
|
||||||
|
result = socket(_sock=s)
|
||||||
|
if PYPY:
|
||||||
|
s._drop()
|
||||||
|
return result
|
||||||
|
|
||||||
|
elif 'fromfd' in __implements__:
|
||||||
|
__implements__.remove('fromfd')
|
||||||
|
|
||||||
|
if hasattr(__socket__, 'ssl'):
|
||||||
|
|
||||||
|
def ssl(sock, keyfile=None, certfile=None):
|
||||||
|
# deprecated in 2.7.9 but still present;
|
||||||
|
# sometimes backported by distros. See ssl.py
|
||||||
|
# Note that we import gevent.ssl, not _ssl2, to get the correct
|
||||||
|
# version.
|
||||||
|
from gevent import ssl as _sslmod
|
||||||
|
# wrap_socket is 2.7.9/backport, sslwrap_simple is older. They take
|
||||||
|
# the same arguments.
|
||||||
|
wrap = getattr(_sslmod, 'wrap_socket', None) or getattr(_sslmod, 'sslwrap_simple')
|
||||||
|
return wrap(sock, keyfile, certfile)
|
||||||
|
__implements__.append('ssl')
|
||||||
|
|
||||||
|
__all__ = __implements__ + __extensions__ + __imports__
|
||||||
1065
python/gevent/_socket3.py
Normal file
1065
python/gevent/_socket3.py
Normal file
File diff suppressed because it is too large
Load Diff
343
python/gevent/_socketcommon.py
Normal file
343
python/gevent/_socketcommon.py
Normal file
@@ -0,0 +1,343 @@
|
|||||||
|
# Copyright (c) 2009-2014 Denis Bilenko and gevent contributors. See LICENSE for details.
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
# standard functions and classes that this module re-implements in a gevent-aware way:
|
||||||
|
_implements = [
|
||||||
|
'create_connection',
|
||||||
|
'socket',
|
||||||
|
'SocketType',
|
||||||
|
'fromfd',
|
||||||
|
'socketpair',
|
||||||
|
]
|
||||||
|
|
||||||
|
__dns__ = [
|
||||||
|
'getaddrinfo',
|
||||||
|
'gethostbyname',
|
||||||
|
'gethostbyname_ex',
|
||||||
|
'gethostbyaddr',
|
||||||
|
'getnameinfo',
|
||||||
|
'getfqdn',
|
||||||
|
]
|
||||||
|
|
||||||
|
_implements += __dns__
|
||||||
|
|
||||||
|
# non-standard functions that this module provides:
|
||||||
|
__extensions__ = [
|
||||||
|
'cancel_wait',
|
||||||
|
'wait_read',
|
||||||
|
'wait_write',
|
||||||
|
'wait_readwrite',
|
||||||
|
]
|
||||||
|
|
||||||
|
# standard functions and classes that this module re-imports
|
||||||
|
__imports__ = [
|
||||||
|
'error',
|
||||||
|
'gaierror',
|
||||||
|
'herror',
|
||||||
|
'htonl',
|
||||||
|
'htons',
|
||||||
|
'ntohl',
|
||||||
|
'ntohs',
|
||||||
|
'inet_aton',
|
||||||
|
'inet_ntoa',
|
||||||
|
'inet_pton',
|
||||||
|
'inet_ntop',
|
||||||
|
'timeout',
|
||||||
|
'gethostname',
|
||||||
|
'getprotobyname',
|
||||||
|
'getservbyname',
|
||||||
|
'getservbyport',
|
||||||
|
'getdefaulttimeout',
|
||||||
|
'setdefaulttimeout',
|
||||||
|
# Windows:
|
||||||
|
'errorTab',
|
||||||
|
]
|
||||||
|
|
||||||
|
__py3_imports__ = [
|
||||||
|
# Python 3
|
||||||
|
'AddressFamily',
|
||||||
|
'SocketKind',
|
||||||
|
'CMSG_LEN',
|
||||||
|
'CMSG_SPACE',
|
||||||
|
'dup',
|
||||||
|
'if_indextoname',
|
||||||
|
'if_nameindex',
|
||||||
|
'if_nametoindex',
|
||||||
|
'sethostname',
|
||||||
|
]
|
||||||
|
|
||||||
|
__imports__.extend(__py3_imports__)
|
||||||
|
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from gevent.hub import get_hub
|
||||||
|
from gevent.hub import ConcurrentObjectUseError
|
||||||
|
from gevent.timeout import Timeout
|
||||||
|
from gevent._compat import string_types, integer_types, PY3
|
||||||
|
from gevent._util import copy_globals
|
||||||
|
from gevent._util import _NONE
|
||||||
|
|
||||||
|
is_windows = sys.platform == 'win32'
|
||||||
|
# pylint:disable=no-name-in-module,unused-import
|
||||||
|
if is_windows:
|
||||||
|
# no such thing as WSAEPERM or error code 10001 according to winsock.h or MSDN
|
||||||
|
from errno import WSAEINVAL as EINVAL
|
||||||
|
from errno import WSAEWOULDBLOCK as EWOULDBLOCK
|
||||||
|
from errno import WSAEINPROGRESS as EINPROGRESS
|
||||||
|
from errno import WSAEALREADY as EALREADY
|
||||||
|
from errno import WSAEISCONN as EISCONN
|
||||||
|
from gevent.win32util import formatError as strerror
|
||||||
|
EAGAIN = EWOULDBLOCK
|
||||||
|
else:
|
||||||
|
from errno import EINVAL
|
||||||
|
from errno import EWOULDBLOCK
|
||||||
|
from errno import EINPROGRESS
|
||||||
|
from errno import EALREADY
|
||||||
|
from errno import EAGAIN
|
||||||
|
from errno import EISCONN
|
||||||
|
from os import strerror
|
||||||
|
|
||||||
|
try:
|
||||||
|
from errno import EBADF
|
||||||
|
except ImportError:
|
||||||
|
EBADF = 9
|
||||||
|
|
||||||
|
import _socket
|
||||||
|
_realsocket = _socket.socket
|
||||||
|
import socket as __socket__
|
||||||
|
|
||||||
|
_name = _value = None
|
||||||
|
__imports__ = copy_globals(__socket__, globals(),
|
||||||
|
only_names=__imports__,
|
||||||
|
ignore_missing_names=True)
|
||||||
|
|
||||||
|
for _name in __socket__.__all__:
|
||||||
|
_value = getattr(__socket__, _name)
|
||||||
|
if isinstance(_value, (integer_types, string_types)):
|
||||||
|
globals()[_name] = _value
|
||||||
|
__imports__.append(_name)
|
||||||
|
|
||||||
|
del _name, _value
|
||||||
|
|
||||||
|
_timeout_error = timeout # pylint: disable=undefined-variable
|
||||||
|
|
||||||
|
|
||||||
|
def wait(io, timeout=None, timeout_exc=_NONE):
|
||||||
|
"""
|
||||||
|
Block the current greenlet until *io* is ready.
|
||||||
|
|
||||||
|
If *timeout* is non-negative, then *timeout_exc* is raised after
|
||||||
|
*timeout* second has passed. By default *timeout_exc* is
|
||||||
|
``socket.timeout('timed out')``.
|
||||||
|
|
||||||
|
If :func:`cancel_wait` is called on *io* by another greenlet,
|
||||||
|
raise an exception in this blocking greenlet
|
||||||
|
(``socket.error(EBADF, 'File descriptor was closed in another
|
||||||
|
greenlet')`` by default).
|
||||||
|
|
||||||
|
:param io: A libev watcher, most commonly an IO watcher obtained from
|
||||||
|
:meth:`gevent.core.loop.io`
|
||||||
|
:keyword timeout_exc: The exception to raise if the timeout expires.
|
||||||
|
By default, a :class:`socket.timeout` exception is raised.
|
||||||
|
If you pass a value for this keyword, it is interpreted as for
|
||||||
|
:class:`gevent.timeout.Timeout`.
|
||||||
|
"""
|
||||||
|
if io.callback is not None:
|
||||||
|
raise ConcurrentObjectUseError('This socket is already used by another greenlet: %r' % (io.callback, ))
|
||||||
|
if timeout is not None:
|
||||||
|
timeout_exc = timeout_exc if timeout_exc is not _NONE else _timeout_error('timed out')
|
||||||
|
timeout = Timeout.start_new(timeout, timeout_exc)
|
||||||
|
|
||||||
|
try:
|
||||||
|
return get_hub().wait(io)
|
||||||
|
finally:
|
||||||
|
if timeout is not None:
|
||||||
|
timeout.cancel()
|
||||||
|
# rename "io" to "watcher" because wait() works with any watcher
|
||||||
|
|
||||||
|
|
||||||
|
def wait_read(fileno, timeout=None, timeout_exc=_NONE):
|
||||||
|
"""
|
||||||
|
Block the current greenlet until *fileno* is ready to read.
|
||||||
|
|
||||||
|
For the meaning of the other parameters and possible exceptions,
|
||||||
|
see :func:`wait`.
|
||||||
|
|
||||||
|
.. seealso:: :func:`cancel_wait`
|
||||||
|
"""
|
||||||
|
io = get_hub().loop.io(fileno, 1)
|
||||||
|
return wait(io, timeout, timeout_exc)
|
||||||
|
|
||||||
|
|
||||||
|
def wait_write(fileno, timeout=None, timeout_exc=_NONE, event=_NONE):
|
||||||
|
"""
|
||||||
|
Block the current greenlet until *fileno* is ready to write.
|
||||||
|
|
||||||
|
For the meaning of the other parameters and possible exceptions,
|
||||||
|
see :func:`wait`.
|
||||||
|
|
||||||
|
:keyword event: Ignored. Applications should not pass this parameter.
|
||||||
|
In the future, it may become an error.
|
||||||
|
|
||||||
|
.. seealso:: :func:`cancel_wait`
|
||||||
|
"""
|
||||||
|
# pylint:disable=unused-argument
|
||||||
|
io = get_hub().loop.io(fileno, 2)
|
||||||
|
return wait(io, timeout, timeout_exc)
|
||||||
|
|
||||||
|
|
||||||
|
def wait_readwrite(fileno, timeout=None, timeout_exc=_NONE, event=_NONE):
|
||||||
|
"""
|
||||||
|
Block the current greenlet until *fileno* is ready to read or
|
||||||
|
write.
|
||||||
|
|
||||||
|
For the meaning of the other parameters and possible exceptions,
|
||||||
|
see :func:`wait`.
|
||||||
|
|
||||||
|
:keyword event: Ignored. Applications should not pass this parameter.
|
||||||
|
In the future, it may become an error.
|
||||||
|
|
||||||
|
.. seealso:: :func:`cancel_wait`
|
||||||
|
"""
|
||||||
|
# pylint:disable=unused-argument
|
||||||
|
io = get_hub().loop.io(fileno, 3)
|
||||||
|
return wait(io, timeout, timeout_exc)
|
||||||
|
|
||||||
|
#: The exception raised by default on a call to :func:`cancel_wait`
|
||||||
|
class cancel_wait_ex(error): # pylint: disable=undefined-variable
|
||||||
|
def __init__(self):
|
||||||
|
super(cancel_wait_ex, self).__init__(
|
||||||
|
EBADF,
|
||||||
|
'File descriptor was closed in another greenlet')
|
||||||
|
|
||||||
|
|
||||||
|
def cancel_wait(watcher, error=cancel_wait_ex):
|
||||||
|
"""See :meth:`gevent.hub.Hub.cancel_wait`"""
|
||||||
|
get_hub().cancel_wait(watcher, error)
|
||||||
|
|
||||||
|
|
||||||
|
class BlockingResolver(object):
|
||||||
|
|
||||||
|
def __init__(self, hub=None):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
for method in ['gethostbyname',
|
||||||
|
'gethostbyname_ex',
|
||||||
|
'getaddrinfo',
|
||||||
|
'gethostbyaddr',
|
||||||
|
'getnameinfo']:
|
||||||
|
locals()[method] = staticmethod(getattr(_socket, method))
|
||||||
|
|
||||||
|
|
||||||
|
def gethostbyname(hostname):
|
||||||
|
"""
|
||||||
|
gethostbyname(host) -> address
|
||||||
|
|
||||||
|
Return the IP address (a string of the form '255.255.255.255') for a host.
|
||||||
|
|
||||||
|
.. seealso:: :doc:`dns`
|
||||||
|
"""
|
||||||
|
return get_hub().resolver.gethostbyname(hostname)
|
||||||
|
|
||||||
|
|
||||||
|
def gethostbyname_ex(hostname):
|
||||||
|
"""
|
||||||
|
gethostbyname_ex(host) -> (name, aliaslist, addresslist)
|
||||||
|
|
||||||
|
Return the true host name, a list of aliases, and a list of IP addresses,
|
||||||
|
for a host. The host argument is a string giving a host name or IP number.
|
||||||
|
Resolve host and port into list of address info entries.
|
||||||
|
|
||||||
|
.. seealso:: :doc:`dns`
|
||||||
|
"""
|
||||||
|
return get_hub().resolver.gethostbyname_ex(hostname)
|
||||||
|
|
||||||
|
|
||||||
|
def getaddrinfo(host, port, family=0, socktype=0, proto=0, flags=0):
|
||||||
|
"""
|
||||||
|
Resolve host and port into list of address info entries.
|
||||||
|
|
||||||
|
Translate the host/port argument into a sequence of 5-tuples that contain
|
||||||
|
all the necessary arguments for creating a socket connected to that service.
|
||||||
|
host is a domain name, a string representation of an IPv4/v6 address or
|
||||||
|
None. port is a string service name such as 'http', a numeric port number or
|
||||||
|
None. By passing None as the value of host and port, you can pass NULL to
|
||||||
|
the underlying C API.
|
||||||
|
|
||||||
|
The family, type and proto arguments can be optionally specified in order to
|
||||||
|
narrow the list of addresses returned. Passing zero as a value for each of
|
||||||
|
these arguments selects the full range of results.
|
||||||
|
|
||||||
|
.. seealso:: :doc:`dns`
|
||||||
|
"""
|
||||||
|
return get_hub().resolver.getaddrinfo(host, port, family, socktype, proto, flags)
|
||||||
|
|
||||||
|
if PY3:
|
||||||
|
# The name of the socktype param changed to type in Python 3.
|
||||||
|
# See https://github.com/gevent/gevent/issues/960
|
||||||
|
# Using inspect here to directly detect the condition is painful because we have to
|
||||||
|
# wrap it with a try/except TypeError because not all Python 2
|
||||||
|
# versions can get the args of a builtin; we also have to use a with to suppress
|
||||||
|
# the deprecation warning.
|
||||||
|
d = getaddrinfo.__doc__
|
||||||
|
|
||||||
|
def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): # pylint:disable=function-redefined
|
||||||
|
return get_hub().resolver.getaddrinfo(host, port, family, type, proto, flags)
|
||||||
|
getaddrinfo.__doc__ = d
|
||||||
|
del d
|
||||||
|
|
||||||
|
|
||||||
|
def gethostbyaddr(ip_address):
|
||||||
|
"""
|
||||||
|
gethostbyaddr(ip_address) -> (name, aliaslist, addresslist)
|
||||||
|
|
||||||
|
Return the true host name, a list of aliases, and a list of IP addresses,
|
||||||
|
for a host. The host argument is a string giving a host name or IP number.
|
||||||
|
|
||||||
|
.. seealso:: :doc:`dns`
|
||||||
|
"""
|
||||||
|
return get_hub().resolver.gethostbyaddr(ip_address)
|
||||||
|
|
||||||
|
|
||||||
|
def getnameinfo(sockaddr, flags):
|
||||||
|
"""
|
||||||
|
getnameinfo(sockaddr, flags) -> (host, port)
|
||||||
|
|
||||||
|
Get host and port for a sockaddr.
|
||||||
|
|
||||||
|
.. seealso:: :doc:`dns`
|
||||||
|
"""
|
||||||
|
return get_hub().resolver.getnameinfo(sockaddr, flags)
|
||||||
|
|
||||||
|
|
||||||
|
def getfqdn(name=''):
|
||||||
|
"""Get fully qualified domain name from name.
|
||||||
|
|
||||||
|
An empty argument is interpreted as meaning the local host.
|
||||||
|
|
||||||
|
First the hostname returned by gethostbyaddr() is checked, then
|
||||||
|
possibly existing aliases. In case no FQDN is available, hostname
|
||||||
|
from gethostname() is returned.
|
||||||
|
"""
|
||||||
|
# pylint: disable=undefined-variable
|
||||||
|
name = name.strip()
|
||||||
|
if not name or name == '0.0.0.0':
|
||||||
|
name = gethostname()
|
||||||
|
try:
|
||||||
|
hostname, aliases, _ = gethostbyaddr(name)
|
||||||
|
except error:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
aliases.insert(0, hostname)
|
||||||
|
for name in aliases: # EWW! pylint:disable=redefined-argument-from-local
|
||||||
|
if isinstance(name, bytes):
|
||||||
|
if b'.' in name:
|
||||||
|
break
|
||||||
|
elif '.' in name:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
name = hostname
|
||||||
|
return name
|
||||||
436
python/gevent/_ssl2.py
Normal file
436
python/gevent/_ssl2.py
Normal file
@@ -0,0 +1,436 @@
|
|||||||
|
# Wrapper module for _ssl. Written by Bill Janssen.
|
||||||
|
# Ported to gevent by Denis Bilenko.
|
||||||
|
"""SSL wrapper for socket objects on Python 2.7.8 and below.
|
||||||
|
|
||||||
|
For the documentation, refer to :mod:`ssl` module manual.
|
||||||
|
|
||||||
|
This module implements cooperative SSL socket wrappers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
|
# Our import magic sadly makes this warning useless
|
||||||
|
# pylint: disable=undefined-variable,arguments-differ,no-member
|
||||||
|
|
||||||
|
import ssl as __ssl__
|
||||||
|
|
||||||
|
_ssl = __ssl__._ssl
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import errno
|
||||||
|
from gevent._socket2 import socket
|
||||||
|
from gevent.socket import _fileobject, timeout_default
|
||||||
|
from gevent.socket import error as socket_error, EWOULDBLOCK
|
||||||
|
from gevent.socket import timeout as _socket_timeout
|
||||||
|
from gevent._compat import PYPY
|
||||||
|
from gevent._util import copy_globals
|
||||||
|
|
||||||
|
|
||||||
|
__implements__ = ['SSLSocket',
|
||||||
|
'wrap_socket',
|
||||||
|
'get_server_certificate',
|
||||||
|
'sslwrap_simple']
|
||||||
|
|
||||||
|
# Import all symbols from Python's ssl.py, except those that we are implementing
|
||||||
|
# and "private" symbols.
|
||||||
|
__imports__ = copy_globals(__ssl__, globals(),
|
||||||
|
# SSLSocket *must* subclass gevent.socket.socket; see issue 597
|
||||||
|
names_to_ignore=__implements__ + ['socket'],
|
||||||
|
dunder_names_to_keep=())
|
||||||
|
|
||||||
|
|
||||||
|
# Py2.6 can get RAND_status added twice
|
||||||
|
__all__ = list(set(__implements__) | set(__imports__))
|
||||||
|
if 'namedtuple' in __all__:
|
||||||
|
__all__.remove('namedtuple')
|
||||||
|
|
||||||
|
class SSLSocket(socket):
|
||||||
|
"""
|
||||||
|
gevent `ssl.SSLSocket <https://docs.python.org/2.6/library/ssl.html#sslsocket-objects>`_
|
||||||
|
for Pythons < 2.7.9.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, sock, keyfile=None, certfile=None,
|
||||||
|
server_side=False, cert_reqs=CERT_NONE,
|
||||||
|
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
|
||||||
|
do_handshake_on_connect=True,
|
||||||
|
suppress_ragged_eofs=True,
|
||||||
|
ciphers=None):
|
||||||
|
socket.__init__(self, _sock=sock)
|
||||||
|
|
||||||
|
if PYPY:
|
||||||
|
sock._drop()
|
||||||
|
|
||||||
|
if certfile and not keyfile:
|
||||||
|
keyfile = certfile
|
||||||
|
# see if it's connected
|
||||||
|
try:
|
||||||
|
socket.getpeername(self)
|
||||||
|
except socket_error as e:
|
||||||
|
if e.args[0] != errno.ENOTCONN:
|
||||||
|
raise
|
||||||
|
# no, no connection yet
|
||||||
|
self._sslobj = None
|
||||||
|
else:
|
||||||
|
# yes, create the SSL object
|
||||||
|
if ciphers is None:
|
||||||
|
self._sslobj = _ssl.sslwrap(self._sock, server_side,
|
||||||
|
keyfile, certfile,
|
||||||
|
cert_reqs, ssl_version, ca_certs)
|
||||||
|
else:
|
||||||
|
self._sslobj = _ssl.sslwrap(self._sock, server_side,
|
||||||
|
keyfile, certfile,
|
||||||
|
cert_reqs, ssl_version, ca_certs,
|
||||||
|
ciphers)
|
||||||
|
if do_handshake_on_connect:
|
||||||
|
self.do_handshake()
|
||||||
|
self.keyfile = keyfile
|
||||||
|
self.certfile = certfile
|
||||||
|
self.cert_reqs = cert_reqs
|
||||||
|
self.ssl_version = ssl_version
|
||||||
|
self.ca_certs = ca_certs
|
||||||
|
self.ciphers = ciphers
|
||||||
|
self.do_handshake_on_connect = do_handshake_on_connect
|
||||||
|
self.suppress_ragged_eofs = suppress_ragged_eofs
|
||||||
|
self._makefile_refs = 0
|
||||||
|
|
||||||
|
def read(self, len=1024):
|
||||||
|
"""Read up to LEN bytes and return them.
|
||||||
|
Return zero-length string on EOF."""
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return self._sslobj.read(len)
|
||||||
|
except SSLError as ex:
|
||||||
|
if ex.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
|
||||||
|
return ''
|
||||||
|
elif ex.args[0] == SSL_ERROR_WANT_READ:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._read_event, timeout_exc=_SSLErrorReadTimeout)
|
||||||
|
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
sys.exc_clear()
|
||||||
|
# note: using _SSLErrorReadTimeout rather than _SSLErrorWriteTimeout below is intentional
|
||||||
|
self._wait(self._write_event, timeout_exc=_SSLErrorReadTimeout)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def write(self, data):
|
||||||
|
"""Write DATA to the underlying SSL channel. Returns
|
||||||
|
number of bytes of DATA actually transmitted."""
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return self._sslobj.write(data)
|
||||||
|
except SSLError as ex:
|
||||||
|
if ex.args[0] == SSL_ERROR_WANT_READ:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._read_event, timeout_exc=_SSLErrorWriteTimeout)
|
||||||
|
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._write_event, timeout_exc=_SSLErrorWriteTimeout)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def getpeercert(self, binary_form=False):
|
||||||
|
"""Returns a formatted version of the data in the
|
||||||
|
certificate provided by the other end of the SSL channel.
|
||||||
|
Return None if no certificate was provided, {} if a
|
||||||
|
certificate was provided, but not validated."""
|
||||||
|
return self._sslobj.peer_certificate(binary_form)
|
||||||
|
|
||||||
|
def cipher(self):
|
||||||
|
if not self._sslobj:
|
||||||
|
return None
|
||||||
|
return self._sslobj.cipher()
|
||||||
|
|
||||||
|
def send(self, data, flags=0, timeout=timeout_default):
|
||||||
|
if timeout is timeout_default:
|
||||||
|
timeout = self.timeout
|
||||||
|
if self._sslobj:
|
||||||
|
if flags != 0:
|
||||||
|
raise ValueError(
|
||||||
|
"non-zero flags not allowed in calls to send() on %s" %
|
||||||
|
self.__class__)
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
v = self._sslobj.write(data)
|
||||||
|
except SSLError as x:
|
||||||
|
if x.args[0] == SSL_ERROR_WANT_READ:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
return 0
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._read_event)
|
||||||
|
elif x.args[0] == SSL_ERROR_WANT_WRITE:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
return 0
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._write_event)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
return v
|
||||||
|
else:
|
||||||
|
return socket.send(self, data, flags, timeout)
|
||||||
|
# is it possible for sendall() to send some data without encryption if another end shut down SSL?
|
||||||
|
|
||||||
|
def sendall(self, data, flags=0):
|
||||||
|
try:
|
||||||
|
socket.sendall(self, data)
|
||||||
|
except _socket_timeout as ex:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
# Python 2 simply *hangs* in this case, which is bad, but
|
||||||
|
# Python 3 raises SSLWantWriteError. We do the same.
|
||||||
|
raise SSLError(SSL_ERROR_WANT_WRITE)
|
||||||
|
# Convert the socket.timeout back to the sslerror
|
||||||
|
raise SSLError(*ex.args)
|
||||||
|
|
||||||
|
def sendto(self, *args):
|
||||||
|
if self._sslobj:
|
||||||
|
raise ValueError("sendto not allowed on instances of %s" %
|
||||||
|
self.__class__)
|
||||||
|
else:
|
||||||
|
return socket.sendto(self, *args)
|
||||||
|
|
||||||
|
def recv(self, buflen=1024, flags=0):
|
||||||
|
if self._sslobj:
|
||||||
|
if flags != 0:
|
||||||
|
raise ValueError(
|
||||||
|
"non-zero flags not allowed in calls to recv() on %s" %
|
||||||
|
self.__class__)
|
||||||
|
# QQQ Shouldn't we wrap the SSL_WANT_READ errors as socket.timeout errors to match socket.recv's behavior?
|
||||||
|
return self.read(buflen)
|
||||||
|
else:
|
||||||
|
return socket.recv(self, buflen, flags)
|
||||||
|
|
||||||
|
def recv_into(self, buffer, nbytes=None, flags=0):
|
||||||
|
if buffer and (nbytes is None):
|
||||||
|
nbytes = len(buffer)
|
||||||
|
elif nbytes is None:
|
||||||
|
nbytes = 1024
|
||||||
|
if self._sslobj:
|
||||||
|
if flags != 0:
|
||||||
|
raise ValueError(
|
||||||
|
"non-zero flags not allowed in calls to recv_into() on %s" %
|
||||||
|
self.__class__)
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
tmp_buffer = self.read(nbytes)
|
||||||
|
v = len(tmp_buffer)
|
||||||
|
buffer[:v] = tmp_buffer
|
||||||
|
return v
|
||||||
|
except SSLError as x:
|
||||||
|
if x.args[0] == SSL_ERROR_WANT_READ:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._read_event)
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
return socket.recv_into(self, buffer, nbytes, flags)
|
||||||
|
|
||||||
|
def recvfrom(self, *args):
|
||||||
|
if self._sslobj:
|
||||||
|
raise ValueError("recvfrom not allowed on instances of %s" %
|
||||||
|
self.__class__)
|
||||||
|
else:
|
||||||
|
return socket.recvfrom(self, *args)
|
||||||
|
|
||||||
|
def recvfrom_into(self, *args):
|
||||||
|
if self._sslobj:
|
||||||
|
raise ValueError("recvfrom_into not allowed on instances of %s" %
|
||||||
|
self.__class__)
|
||||||
|
else:
|
||||||
|
return socket.recvfrom_into(self, *args)
|
||||||
|
|
||||||
|
def pending(self):
|
||||||
|
if self._sslobj:
|
||||||
|
return self._sslobj.pending()
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def _sslobj_shutdown(self):
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return self._sslobj.shutdown()
|
||||||
|
except SSLError as ex:
|
||||||
|
if ex.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
|
||||||
|
return ''
|
||||||
|
elif ex.args[0] == SSL_ERROR_WANT_READ:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._read_event, timeout_exc=_SSLErrorReadTimeout)
|
||||||
|
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._write_event, timeout_exc=_SSLErrorWriteTimeout)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def unwrap(self):
|
||||||
|
if self._sslobj:
|
||||||
|
s = self._sslobj_shutdown()
|
||||||
|
self._sslobj = None
|
||||||
|
return socket(_sock=s)
|
||||||
|
else:
|
||||||
|
raise ValueError("No SSL wrapper around " + str(self))
|
||||||
|
|
||||||
|
def shutdown(self, how):
|
||||||
|
self._sslobj = None
|
||||||
|
socket.shutdown(self, how)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if self._makefile_refs < 1:
|
||||||
|
self._sslobj = None
|
||||||
|
socket.close(self)
|
||||||
|
else:
|
||||||
|
self._makefile_refs -= 1
|
||||||
|
|
||||||
|
if PYPY:
|
||||||
|
|
||||||
|
def _reuse(self):
|
||||||
|
self._makefile_refs += 1
|
||||||
|
|
||||||
|
def _drop(self):
|
||||||
|
if self._makefile_refs < 1:
|
||||||
|
self.close()
|
||||||
|
else:
|
||||||
|
self._makefile_refs -= 1
|
||||||
|
|
||||||
|
def do_handshake(self):
|
||||||
|
"""Perform a TLS/SSL handshake."""
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return self._sslobj.do_handshake()
|
||||||
|
except SSLError as ex:
|
||||||
|
if ex.args[0] == SSL_ERROR_WANT_READ:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._read_event, timeout_exc=_SSLErrorHandshakeTimeout)
|
||||||
|
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._write_event, timeout_exc=_SSLErrorHandshakeTimeout)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def connect(self, addr):
|
||||||
|
"""Connects to remote ADDR, and then wraps the connection in
|
||||||
|
an SSL channel."""
|
||||||
|
# Here we assume that the socket is client-side, and not
|
||||||
|
# connected at the time of the call. We connect it, then wrap it.
|
||||||
|
if self._sslobj:
|
||||||
|
raise ValueError("attempt to connect already-connected SSLSocket!")
|
||||||
|
socket.connect(self, addr)
|
||||||
|
if self.ciphers is None:
|
||||||
|
self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile,
|
||||||
|
self.cert_reqs, self.ssl_version,
|
||||||
|
self.ca_certs)
|
||||||
|
else:
|
||||||
|
self._sslobj = _ssl.sslwrap(self._sock, False, self.keyfile, self.certfile,
|
||||||
|
self.cert_reqs, self.ssl_version,
|
||||||
|
self.ca_certs, self.ciphers)
|
||||||
|
if self.do_handshake_on_connect:
|
||||||
|
self.do_handshake()
|
||||||
|
|
||||||
|
def accept(self):
|
||||||
|
"""Accepts a new connection from a remote client, and returns
|
||||||
|
a tuple containing that new connection wrapped with a server-side
|
||||||
|
SSL channel, and the address of the remote client."""
|
||||||
|
sock = self._sock
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
client_socket, address = sock.accept()
|
||||||
|
break
|
||||||
|
except socket_error as ex:
|
||||||
|
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._read_event)
|
||||||
|
|
||||||
|
sslobj = SSLSocket(client_socket,
|
||||||
|
keyfile=self.keyfile,
|
||||||
|
certfile=self.certfile,
|
||||||
|
server_side=True,
|
||||||
|
cert_reqs=self.cert_reqs,
|
||||||
|
ssl_version=self.ssl_version,
|
||||||
|
ca_certs=self.ca_certs,
|
||||||
|
do_handshake_on_connect=self.do_handshake_on_connect,
|
||||||
|
suppress_ragged_eofs=self.suppress_ragged_eofs,
|
||||||
|
ciphers=self.ciphers)
|
||||||
|
|
||||||
|
return sslobj, address
|
||||||
|
|
||||||
|
def makefile(self, mode='r', bufsize=-1):
|
||||||
|
"""Make and return a file-like object that
|
||||||
|
works with the SSL connection. Just use the code
|
||||||
|
from the socket module."""
|
||||||
|
if not PYPY:
|
||||||
|
self._makefile_refs += 1
|
||||||
|
# close=True so as to decrement the reference count when done with
|
||||||
|
# the file-like object.
|
||||||
|
return _fileobject(self, mode, bufsize, close=True)
|
||||||
|
|
||||||
|
if PYPY or not hasattr(SSLSocket, 'timeout'):
|
||||||
|
# PyPy (and certain versions of CPython) doesn't have a direct
|
||||||
|
# 'timeout' property on raw sockets, because that's not part of
|
||||||
|
# the documented specification. We may wind up wrapping a raw
|
||||||
|
# socket (when ssl is used with PyWSGI) or a gevent socket, which
|
||||||
|
# does have a read/write timeout property as an alias for
|
||||||
|
# get/settimeout, so make sure that's always the case because
|
||||||
|
# pywsgi can depend on that.
|
||||||
|
SSLSocket.timeout = property(lambda self: self.gettimeout(),
|
||||||
|
lambda self, value: self.settimeout(value))
|
||||||
|
|
||||||
|
|
||||||
|
_SSLErrorReadTimeout = SSLError('The read operation timed out')
|
||||||
|
_SSLErrorWriteTimeout = SSLError('The write operation timed out')
|
||||||
|
_SSLErrorHandshakeTimeout = SSLError('The handshake operation timed out')
|
||||||
|
|
||||||
|
|
||||||
|
def wrap_socket(sock, keyfile=None, certfile=None,
|
||||||
|
server_side=False, cert_reqs=CERT_NONE,
|
||||||
|
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
|
||||||
|
do_handshake_on_connect=True,
|
||||||
|
suppress_ragged_eofs=True, ciphers=None):
|
||||||
|
"""Create a new :class:`SSLSocket` instance."""
|
||||||
|
return SSLSocket(sock, keyfile=keyfile, certfile=certfile,
|
||||||
|
server_side=server_side, cert_reqs=cert_reqs,
|
||||||
|
ssl_version=ssl_version, ca_certs=ca_certs,
|
||||||
|
do_handshake_on_connect=do_handshake_on_connect,
|
||||||
|
suppress_ragged_eofs=suppress_ragged_eofs,
|
||||||
|
ciphers=ciphers)
|
||||||
|
|
||||||
|
|
||||||
|
def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv23, ca_certs=None):
|
||||||
|
"""Retrieve the certificate from the server at the specified address,
|
||||||
|
and return it as a PEM-encoded string.
|
||||||
|
If 'ca_certs' is specified, validate the server cert against it.
|
||||||
|
If 'ssl_version' is specified, use it in the connection attempt."""
|
||||||
|
|
||||||
|
if ca_certs is not None:
|
||||||
|
cert_reqs = CERT_REQUIRED
|
||||||
|
else:
|
||||||
|
cert_reqs = CERT_NONE
|
||||||
|
s = wrap_socket(socket(), ssl_version=ssl_version,
|
||||||
|
cert_reqs=cert_reqs, ca_certs=ca_certs)
|
||||||
|
s.connect(addr)
|
||||||
|
dercert = s.getpeercert(True)
|
||||||
|
s.close()
|
||||||
|
return DER_cert_to_PEM_cert(dercert)
|
||||||
|
|
||||||
|
|
||||||
|
def sslwrap_simple(sock, keyfile=None, certfile=None):
|
||||||
|
"""A replacement for the old socket.ssl function. Designed
|
||||||
|
for compatability with Python 2.5 and earlier. Will disappear in
|
||||||
|
Python 3.0."""
|
||||||
|
return SSLSocket(sock, keyfile, certfile)
|
||||||
661
python/gevent/_ssl3.py
Normal file
661
python/gevent/_ssl3.py
Normal file
@@ -0,0 +1,661 @@
|
|||||||
|
# Wrapper module for _ssl. Written by Bill Janssen.
|
||||||
|
# Ported to gevent by Denis Bilenko.
|
||||||
|
"""SSL wrapper for socket objects on Python 3.
|
||||||
|
|
||||||
|
For the documentation, refer to :mod:`ssl` module manual.
|
||||||
|
|
||||||
|
This module implements cooperative SSL socket wrappers.
|
||||||
|
"""
|
||||||
|
# Our import magic sadly makes this warning useless
|
||||||
|
# pylint: disable=undefined-variable
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
|
import ssl as __ssl__
|
||||||
|
|
||||||
|
_ssl = __ssl__._ssl # pylint:disable=no-member
|
||||||
|
|
||||||
|
import errno
|
||||||
|
from gevent.socket import socket, timeout_default
|
||||||
|
from gevent.socket import error as socket_error
|
||||||
|
from gevent.socket import timeout as _socket_timeout
|
||||||
|
from gevent._util import copy_globals
|
||||||
|
|
||||||
|
from weakref import ref as _wref
|
||||||
|
|
||||||
|
__implements__ = [
|
||||||
|
'SSLContext',
|
||||||
|
'SSLSocket',
|
||||||
|
'wrap_socket',
|
||||||
|
'get_server_certificate',
|
||||||
|
]
|
||||||
|
|
||||||
|
# Import all symbols from Python's ssl.py, except those that we are implementing
|
||||||
|
# and "private" symbols.
|
||||||
|
__imports__ = copy_globals(__ssl__, globals(),
|
||||||
|
# SSLSocket *must* subclass gevent.socket.socket; see issue 597
|
||||||
|
names_to_ignore=__implements__ + ['socket'],
|
||||||
|
dunder_names_to_keep=())
|
||||||
|
|
||||||
|
__all__ = __implements__ + __imports__
|
||||||
|
if 'namedtuple' in __all__:
|
||||||
|
__all__.remove('namedtuple')
|
||||||
|
|
||||||
|
orig_SSLContext = __ssl__.SSLContext # pylint:disable=no-member
|
||||||
|
|
||||||
|
|
||||||
|
class SSLContext(orig_SSLContext):
|
||||||
|
def wrap_socket(self, sock, server_side=False,
|
||||||
|
do_handshake_on_connect=True,
|
||||||
|
suppress_ragged_eofs=True,
|
||||||
|
server_hostname=None,
|
||||||
|
session=None):
|
||||||
|
# pylint:disable=arguments-differ
|
||||||
|
# (3.6 adds session)
|
||||||
|
# Sadly, using *args and **kwargs doesn't work
|
||||||
|
return SSLSocket(sock=sock, server_side=server_side,
|
||||||
|
do_handshake_on_connect=do_handshake_on_connect,
|
||||||
|
suppress_ragged_eofs=suppress_ragged_eofs,
|
||||||
|
server_hostname=server_hostname,
|
||||||
|
_context=self,
|
||||||
|
_session=session)
|
||||||
|
|
||||||
|
if not hasattr(orig_SSLContext, 'check_hostname'):
|
||||||
|
# Python 3.3 lacks this
|
||||||
|
check_hostname = False
|
||||||
|
|
||||||
|
if hasattr(orig_SSLContext.options, 'setter'):
|
||||||
|
# In 3.6, these became properties. They want to access the
|
||||||
|
# property __set__ method in the superclass, and they do so by using
|
||||||
|
# super(SSLContext, SSLContext). But we rebind SSLContext when we monkey
|
||||||
|
# patch, which causes infinite recursion.
|
||||||
|
# https://github.com/python/cpython/commit/328067c468f82e4ec1b5c510a4e84509e010f296
|
||||||
|
# pylint:disable=no-member
|
||||||
|
@orig_SSLContext.options.setter
|
||||||
|
def options(self, value):
|
||||||
|
super(orig_SSLContext, orig_SSLContext).options.__set__(self, value)
|
||||||
|
|
||||||
|
@orig_SSLContext.verify_flags.setter
|
||||||
|
def verify_flags(self, value):
|
||||||
|
super(orig_SSLContext, orig_SSLContext).verify_flags.__set__(self, value)
|
||||||
|
|
||||||
|
@orig_SSLContext.verify_mode.setter
|
||||||
|
def verify_mode(self, value):
|
||||||
|
super(orig_SSLContext, orig_SSLContext).verify_mode.__set__(self, value)
|
||||||
|
|
||||||
|
|
||||||
|
class _contextawaresock(socket._gevent_sock_class): # Python 2: pylint:disable=slots-on-old-class
|
||||||
|
# We have to pass the raw stdlib socket to SSLContext.wrap_socket.
|
||||||
|
# That method in turn can pass that object on to things like SNI callbacks.
|
||||||
|
# It wouldn't have access to any of the attributes on the SSLSocket, like
|
||||||
|
# context, that it's supposed to (see test_ssl.test_sni_callback). Our
|
||||||
|
# solution is to keep a weak reference to the SSLSocket on the raw
|
||||||
|
# socket and delegate.
|
||||||
|
|
||||||
|
# We keep it in a slot to avoid having the ability to set any attributes
|
||||||
|
# we're not prepared for (because we don't know what to delegate.)
|
||||||
|
|
||||||
|
__slots__ = ('_sslsock',)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def context(self):
|
||||||
|
return self._sslsock().context
|
||||||
|
|
||||||
|
@context.setter
|
||||||
|
def context(self, ctx):
|
||||||
|
self._sslsock().context = ctx
|
||||||
|
|
||||||
|
@property
|
||||||
|
def session(self):
|
||||||
|
"""The SSLSession for client socket."""
|
||||||
|
return self._sslsock().session
|
||||||
|
|
||||||
|
@session.setter
|
||||||
|
def session(self, session):
|
||||||
|
self._sslsock().session = session
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
try:
|
||||||
|
return getattr(self._sslsock(), name)
|
||||||
|
except RuntimeError:
|
||||||
|
# XXX: If the attribute doesn't exist,
|
||||||
|
# we infinitely recurse
|
||||||
|
pass
|
||||||
|
raise AttributeError(name)
|
||||||
|
|
||||||
|
|
||||||
|
class SSLSocket(socket):
|
||||||
|
"""
|
||||||
|
gevent `ssl.SSLSocket <https://docs.python.org/3/library/ssl.html#ssl-sockets>`_
|
||||||
|
for Python 3.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# pylint:disable=too-many-instance-attributes,too-many-public-methods
|
||||||
|
|
||||||
|
_gevent_sock_class = _contextawaresock
|
||||||
|
|
||||||
|
def __init__(self, sock=None, keyfile=None, certfile=None,
|
||||||
|
server_side=False, cert_reqs=CERT_NONE,
|
||||||
|
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
|
||||||
|
do_handshake_on_connect=True,
|
||||||
|
family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None,
|
||||||
|
suppress_ragged_eofs=True, npn_protocols=None, ciphers=None,
|
||||||
|
server_hostname=None,
|
||||||
|
_session=None, # 3.6
|
||||||
|
_context=None):
|
||||||
|
# pylint:disable=too-many-locals,too-many-statements,too-many-branches
|
||||||
|
if _context:
|
||||||
|
self._context = _context
|
||||||
|
else:
|
||||||
|
if server_side and not certfile:
|
||||||
|
raise ValueError("certfile must be specified for server-side "
|
||||||
|
"operations")
|
||||||
|
if keyfile and not certfile:
|
||||||
|
raise ValueError("certfile must be specified")
|
||||||
|
if certfile and not keyfile:
|
||||||
|
keyfile = certfile
|
||||||
|
self._context = SSLContext(ssl_version)
|
||||||
|
self._context.verify_mode = cert_reqs
|
||||||
|
if ca_certs:
|
||||||
|
self._context.load_verify_locations(ca_certs)
|
||||||
|
if certfile:
|
||||||
|
self._context.load_cert_chain(certfile, keyfile)
|
||||||
|
if npn_protocols:
|
||||||
|
self._context.set_npn_protocols(npn_protocols)
|
||||||
|
if ciphers:
|
||||||
|
self._context.set_ciphers(ciphers)
|
||||||
|
self.keyfile = keyfile
|
||||||
|
self.certfile = certfile
|
||||||
|
self.cert_reqs = cert_reqs
|
||||||
|
self.ssl_version = ssl_version
|
||||||
|
self.ca_certs = ca_certs
|
||||||
|
self.ciphers = ciphers
|
||||||
|
# Can't use sock.type as other flags (such as SOCK_NONBLOCK) get
|
||||||
|
# mixed in.
|
||||||
|
if sock.getsockopt(SOL_SOCKET, SO_TYPE) != SOCK_STREAM:
|
||||||
|
raise NotImplementedError("only stream sockets are supported")
|
||||||
|
if server_side:
|
||||||
|
if server_hostname:
|
||||||
|
raise ValueError("server_hostname can only be specified "
|
||||||
|
"in client mode")
|
||||||
|
if _session is not None:
|
||||||
|
raise ValueError("session can only be specified "
|
||||||
|
"in client mode")
|
||||||
|
if self._context.check_hostname and not server_hostname:
|
||||||
|
raise ValueError("check_hostname requires server_hostname")
|
||||||
|
self._session = _session
|
||||||
|
self.server_side = server_side
|
||||||
|
self.server_hostname = server_hostname
|
||||||
|
self.do_handshake_on_connect = do_handshake_on_connect
|
||||||
|
self.suppress_ragged_eofs = suppress_ragged_eofs
|
||||||
|
connected = False
|
||||||
|
if sock is not None:
|
||||||
|
socket.__init__(self,
|
||||||
|
family=sock.family,
|
||||||
|
type=sock.type,
|
||||||
|
proto=sock.proto,
|
||||||
|
fileno=sock.fileno())
|
||||||
|
self.settimeout(sock.gettimeout())
|
||||||
|
# see if it's connected
|
||||||
|
try:
|
||||||
|
sock.getpeername()
|
||||||
|
except socket_error as e:
|
||||||
|
if e.errno != errno.ENOTCONN:
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
connected = True
|
||||||
|
sock.detach()
|
||||||
|
elif fileno is not None:
|
||||||
|
socket.__init__(self, fileno=fileno)
|
||||||
|
else:
|
||||||
|
socket.__init__(self, family=family, type=type, proto=proto)
|
||||||
|
|
||||||
|
self._sock._sslsock = _wref(self)
|
||||||
|
self._closed = False
|
||||||
|
self._sslobj = None
|
||||||
|
self._connected = connected
|
||||||
|
if connected:
|
||||||
|
# create the SSL object
|
||||||
|
try:
|
||||||
|
self._sslobj = self._context._wrap_socket(self._sock, server_side,
|
||||||
|
server_hostname)
|
||||||
|
if _session is not None: # 3.6
|
||||||
|
self._sslobj = SSLObject(self._sslobj, owner=self, session=self._session)
|
||||||
|
if do_handshake_on_connect:
|
||||||
|
timeout = self.gettimeout()
|
||||||
|
if timeout == 0.0:
|
||||||
|
# non-blocking
|
||||||
|
raise ValueError("do_handshake_on_connect should not be specified for non-blocking sockets")
|
||||||
|
self.do_handshake()
|
||||||
|
|
||||||
|
except socket_error as x:
|
||||||
|
self.close()
|
||||||
|
raise x
|
||||||
|
|
||||||
|
@property
|
||||||
|
def context(self):
|
||||||
|
return self._context
|
||||||
|
|
||||||
|
@context.setter
|
||||||
|
def context(self, ctx):
|
||||||
|
self._context = ctx
|
||||||
|
self._sslobj.context = ctx
|
||||||
|
|
||||||
|
@property
|
||||||
|
def session(self):
|
||||||
|
"""The SSLSession for client socket."""
|
||||||
|
if self._sslobj is not None:
|
||||||
|
return self._sslobj.session
|
||||||
|
|
||||||
|
@session.setter
|
||||||
|
def session(self, session):
|
||||||
|
self._session = session
|
||||||
|
if self._sslobj is not None:
|
||||||
|
self._sslobj.session = session
|
||||||
|
|
||||||
|
@property
|
||||||
|
def session_reused(self):
|
||||||
|
"""Was the client session reused during handshake"""
|
||||||
|
if self._sslobj is not None:
|
||||||
|
return self._sslobj.session_reused
|
||||||
|
|
||||||
|
def dup(self):
|
||||||
|
raise NotImplementedError("Can't dup() %s instances" %
|
||||||
|
self.__class__.__name__)
|
||||||
|
|
||||||
|
def _checkClosed(self, msg=None):
|
||||||
|
# raise an exception here if you wish to check for spurious closes
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _check_connected(self):
|
||||||
|
if not self._connected:
|
||||||
|
# getpeername() will raise ENOTCONN if the socket is really
|
||||||
|
# not connected; note that we can be connected even without
|
||||||
|
# _connected being set, e.g. if connect() first returned
|
||||||
|
# EAGAIN.
|
||||||
|
self.getpeername()
|
||||||
|
|
||||||
|
def read(self, len=1024, buffer=None):
|
||||||
|
"""Read up to LEN bytes and return them.
|
||||||
|
Return zero-length string on EOF."""
|
||||||
|
# pylint:disable=too-many-branches
|
||||||
|
self._checkClosed()
|
||||||
|
|
||||||
|
while True:
|
||||||
|
if not self._sslobj:
|
||||||
|
raise ValueError("Read on closed or unwrapped SSL socket.")
|
||||||
|
if len == 0:
|
||||||
|
return b'' if buffer is None else 0
|
||||||
|
# Negative lengths are handled natively when the buffer is None
|
||||||
|
# to raise a ValueError
|
||||||
|
try:
|
||||||
|
if buffer is not None:
|
||||||
|
return self._sslobj.read(len, buffer)
|
||||||
|
return self._sslobj.read(len or 1024)
|
||||||
|
except SSLWantReadError:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
self._wait(self._read_event, timeout_exc=_SSLErrorReadTimeout)
|
||||||
|
except SSLWantWriteError:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
# note: using _SSLErrorReadTimeout rather than _SSLErrorWriteTimeout below is intentional
|
||||||
|
self._wait(self._write_event, timeout_exc=_SSLErrorReadTimeout)
|
||||||
|
except SSLError as ex:
|
||||||
|
if ex.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
|
||||||
|
if buffer is None:
|
||||||
|
return b''
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def write(self, data):
|
||||||
|
"""Write DATA to the underlying SSL channel. Returns
|
||||||
|
number of bytes of DATA actually transmitted."""
|
||||||
|
self._checkClosed()
|
||||||
|
|
||||||
|
while True:
|
||||||
|
if not self._sslobj:
|
||||||
|
raise ValueError("Write on closed or unwrapped SSL socket.")
|
||||||
|
|
||||||
|
try:
|
||||||
|
return self._sslobj.write(data)
|
||||||
|
except SSLError as ex:
|
||||||
|
if ex.args[0] == SSL_ERROR_WANT_READ:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
self._wait(self._read_event, timeout_exc=_SSLErrorWriteTimeout)
|
||||||
|
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
self._wait(self._write_event, timeout_exc=_SSLErrorWriteTimeout)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def getpeercert(self, binary_form=False):
|
||||||
|
"""Returns a formatted version of the data in the
|
||||||
|
certificate provided by the other end of the SSL channel.
|
||||||
|
Return None if no certificate was provided, {} if a
|
||||||
|
certificate was provided, but not validated."""
|
||||||
|
|
||||||
|
self._checkClosed()
|
||||||
|
self._check_connected()
|
||||||
|
try:
|
||||||
|
c = self._sslobj.peer_certificate
|
||||||
|
except AttributeError:
|
||||||
|
# 3.6
|
||||||
|
c = self._sslobj.getpeercert
|
||||||
|
|
||||||
|
return c(binary_form)
|
||||||
|
|
||||||
|
def selected_npn_protocol(self):
|
||||||
|
self._checkClosed()
|
||||||
|
if not self._sslobj or not _ssl.HAS_NPN:
|
||||||
|
return None
|
||||||
|
return self._sslobj.selected_npn_protocol()
|
||||||
|
|
||||||
|
if hasattr(_ssl, 'HAS_ALPN'):
|
||||||
|
# 3.5+
|
||||||
|
def selected_alpn_protocol(self):
|
||||||
|
self._checkClosed()
|
||||||
|
if not self._sslobj or not _ssl.HAS_ALPN: # pylint:disable=no-member
|
||||||
|
return None
|
||||||
|
return self._sslobj.selected_alpn_protocol()
|
||||||
|
|
||||||
|
def shared_ciphers(self):
|
||||||
|
"""Return a list of ciphers shared by the client during the handshake or
|
||||||
|
None if this is not a valid server connection.
|
||||||
|
"""
|
||||||
|
return self._sslobj.shared_ciphers()
|
||||||
|
|
||||||
|
def version(self):
|
||||||
|
"""Return a string identifying the protocol version used by the
|
||||||
|
current SSL channel. """
|
||||||
|
if not self._sslobj:
|
||||||
|
return None
|
||||||
|
return self._sslobj.version()
|
||||||
|
|
||||||
|
# We inherit sendfile from super(); it always uses `send`
|
||||||
|
|
||||||
|
def cipher(self):
|
||||||
|
self._checkClosed()
|
||||||
|
if not self._sslobj:
|
||||||
|
return None
|
||||||
|
return self._sslobj.cipher()
|
||||||
|
|
||||||
|
def compression(self):
|
||||||
|
self._checkClosed()
|
||||||
|
if not self._sslobj:
|
||||||
|
return None
|
||||||
|
return self._sslobj.compression()
|
||||||
|
|
||||||
|
def send(self, data, flags=0, timeout=timeout_default):
|
||||||
|
self._checkClosed()
|
||||||
|
if timeout is timeout_default:
|
||||||
|
timeout = self.timeout
|
||||||
|
if self._sslobj:
|
||||||
|
if flags != 0:
|
||||||
|
raise ValueError(
|
||||||
|
"non-zero flags not allowed in calls to send() on %s" %
|
||||||
|
self.__class__)
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return self._sslobj.write(data)
|
||||||
|
except SSLWantReadError:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
return 0
|
||||||
|
self._wait(self._read_event)
|
||||||
|
except SSLWantWriteError:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
return 0
|
||||||
|
self._wait(self._write_event)
|
||||||
|
else:
|
||||||
|
return socket.send(self, data, flags, timeout)
|
||||||
|
|
||||||
|
def sendto(self, data, flags_or_addr, addr=None):
|
||||||
|
self._checkClosed()
|
||||||
|
if self._sslobj:
|
||||||
|
raise ValueError("sendto not allowed on instances of %s" %
|
||||||
|
self.__class__)
|
||||||
|
elif addr is None:
|
||||||
|
return socket.sendto(self, data, flags_or_addr)
|
||||||
|
else:
|
||||||
|
return socket.sendto(self, data, flags_or_addr, addr)
|
||||||
|
|
||||||
|
def sendmsg(self, *args, **kwargs):
|
||||||
|
# Ensure programs don't send data unencrypted if they try to
|
||||||
|
# use this method.
|
||||||
|
raise NotImplementedError("sendmsg not allowed on instances of %s" %
|
||||||
|
self.__class__)
|
||||||
|
|
||||||
|
def sendall(self, data, flags=0):
|
||||||
|
self._checkClosed()
|
||||||
|
if self._sslobj:
|
||||||
|
if flags != 0:
|
||||||
|
raise ValueError(
|
||||||
|
"non-zero flags not allowed in calls to sendall() on %s" %
|
||||||
|
self.__class__)
|
||||||
|
|
||||||
|
try:
|
||||||
|
return socket.sendall(self, data, flags)
|
||||||
|
except _socket_timeout:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
# Raised by the stdlib on non-blocking sockets
|
||||||
|
raise SSLWantWriteError("The operation did not complete (write)")
|
||||||
|
raise
|
||||||
|
|
||||||
|
def recv(self, buflen=1024, flags=0):
|
||||||
|
self._checkClosed()
|
||||||
|
if self._sslobj:
|
||||||
|
if flags != 0:
|
||||||
|
raise ValueError(
|
||||||
|
"non-zero flags not allowed in calls to recv() on %s" %
|
||||||
|
self.__class__)
|
||||||
|
if buflen == 0:
|
||||||
|
# https://github.com/python/cpython/commit/00915577dd84ba75016400793bf547666e6b29b5
|
||||||
|
# Python #23804
|
||||||
|
return b''
|
||||||
|
return self.read(buflen)
|
||||||
|
else:
|
||||||
|
return socket.recv(self, buflen, flags)
|
||||||
|
|
||||||
|
def recv_into(self, buffer, nbytes=None, flags=0):
|
||||||
|
self._checkClosed()
|
||||||
|
if buffer and (nbytes is None):
|
||||||
|
nbytes = len(buffer)
|
||||||
|
elif nbytes is None:
|
||||||
|
nbytes = 1024
|
||||||
|
if self._sslobj:
|
||||||
|
if flags != 0:
|
||||||
|
raise ValueError("non-zero flags not allowed in calls to recv_into() on %s" % self.__class__)
|
||||||
|
return self.read(nbytes, buffer)
|
||||||
|
else:
|
||||||
|
return socket.recv_into(self, buffer, nbytes, flags)
|
||||||
|
|
||||||
|
def recvfrom(self, buflen=1024, flags=0):
|
||||||
|
self._checkClosed()
|
||||||
|
if self._sslobj:
|
||||||
|
raise ValueError("recvfrom not allowed on instances of %s" %
|
||||||
|
self.__class__)
|
||||||
|
else:
|
||||||
|
return socket.recvfrom(self, buflen, flags)
|
||||||
|
|
||||||
|
def recvfrom_into(self, buffer, nbytes=None, flags=0):
|
||||||
|
self._checkClosed()
|
||||||
|
if self._sslobj:
|
||||||
|
raise ValueError("recvfrom_into not allowed on instances of %s" %
|
||||||
|
self.__class__)
|
||||||
|
else:
|
||||||
|
return socket.recvfrom_into(self, buffer, nbytes, flags)
|
||||||
|
|
||||||
|
def recvmsg(self, *args, **kwargs):
|
||||||
|
raise NotImplementedError("recvmsg not allowed on instances of %s" %
|
||||||
|
self.__class__)
|
||||||
|
|
||||||
|
def recvmsg_into(self, *args, **kwargs):
|
||||||
|
raise NotImplementedError("recvmsg_into not allowed on instances of "
|
||||||
|
"%s" % self.__class__)
|
||||||
|
|
||||||
|
def pending(self):
|
||||||
|
self._checkClosed()
|
||||||
|
if self._sslobj:
|
||||||
|
return self._sslobj.pending()
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def shutdown(self, how):
|
||||||
|
self._checkClosed()
|
||||||
|
self._sslobj = None
|
||||||
|
socket.shutdown(self, how)
|
||||||
|
|
||||||
|
def unwrap(self):
|
||||||
|
if self._sslobj:
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
s = self._sslobj.shutdown()
|
||||||
|
break
|
||||||
|
except SSLWantReadError:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
return 0
|
||||||
|
self._wait(self._read_event)
|
||||||
|
except SSLWantWriteError:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
return 0
|
||||||
|
self._wait(self._write_event)
|
||||||
|
|
||||||
|
self._sslobj = None
|
||||||
|
# The return value of shutting down the SSLObject is the
|
||||||
|
# original wrapped socket, i.e., _contextawaresock. But that
|
||||||
|
# object doesn't have the gevent wrapper around it so it can't
|
||||||
|
# be used. We have to wrap it back up with a gevent wrapper.
|
||||||
|
sock = socket(family=s.family, type=s.type, proto=s.proto, fileno=s.fileno())
|
||||||
|
s.detach()
|
||||||
|
return sock
|
||||||
|
else:
|
||||||
|
raise ValueError("No SSL wrapper around " + str(self))
|
||||||
|
|
||||||
|
def _real_close(self):
|
||||||
|
self._sslobj = None
|
||||||
|
# self._closed = True
|
||||||
|
socket._real_close(self)
|
||||||
|
|
||||||
|
def do_handshake(self):
|
||||||
|
"""Perform a TLS/SSL handshake."""
|
||||||
|
self._check_connected()
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
self._sslobj.do_handshake()
|
||||||
|
break
|
||||||
|
except SSLWantReadError:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
self._wait(self._read_event, timeout_exc=_SSLErrorHandshakeTimeout)
|
||||||
|
except SSLWantWriteError:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
self._wait(self._write_event, timeout_exc=_SSLErrorHandshakeTimeout)
|
||||||
|
|
||||||
|
if self._context.check_hostname:
|
||||||
|
if not self.server_hostname:
|
||||||
|
raise ValueError("check_hostname needs server_hostname "
|
||||||
|
"argument")
|
||||||
|
match_hostname(self.getpeercert(), self.server_hostname)
|
||||||
|
|
||||||
|
def _real_connect(self, addr, connect_ex):
|
||||||
|
if self.server_side:
|
||||||
|
raise ValueError("can't connect in server-side mode")
|
||||||
|
# Here we assume that the socket is client-side, and not
|
||||||
|
# connected at the time of the call. We connect it, then wrap it.
|
||||||
|
if self._connected:
|
||||||
|
raise ValueError("attempt to connect already-connected SSLSocket!")
|
||||||
|
self._sslobj = self._context._wrap_socket(self._sock, False, self.server_hostname)
|
||||||
|
if self._session is not None: # 3.6
|
||||||
|
self._sslobj = SSLObject(self._sslobj, owner=self, session=self._session)
|
||||||
|
try:
|
||||||
|
if connect_ex:
|
||||||
|
rc = socket.connect_ex(self, addr)
|
||||||
|
else:
|
||||||
|
rc = None
|
||||||
|
socket.connect(self, addr)
|
||||||
|
if not rc:
|
||||||
|
if self.do_handshake_on_connect:
|
||||||
|
self.do_handshake()
|
||||||
|
self._connected = True
|
||||||
|
return rc
|
||||||
|
except socket_error:
|
||||||
|
self._sslobj = None
|
||||||
|
raise
|
||||||
|
|
||||||
|
def connect(self, addr):
|
||||||
|
"""Connects to remote ADDR, and then wraps the connection in
|
||||||
|
an SSL channel."""
|
||||||
|
self._real_connect(addr, False)
|
||||||
|
|
||||||
|
def connect_ex(self, addr):
|
||||||
|
"""Connects to remote ADDR, and then wraps the connection in
|
||||||
|
an SSL channel."""
|
||||||
|
return self._real_connect(addr, True)
|
||||||
|
|
||||||
|
def accept(self):
|
||||||
|
"""Accepts a new connection from a remote client, and returns
|
||||||
|
a tuple containing that new connection wrapped with a server-side
|
||||||
|
SSL channel, and the address of the remote client."""
|
||||||
|
|
||||||
|
newsock, addr = socket.accept(self)
|
||||||
|
newsock = self._context.wrap_socket(newsock,
|
||||||
|
do_handshake_on_connect=self.do_handshake_on_connect,
|
||||||
|
suppress_ragged_eofs=self.suppress_ragged_eofs,
|
||||||
|
server_side=True)
|
||||||
|
return newsock, addr
|
||||||
|
|
||||||
|
def get_channel_binding(self, cb_type="tls-unique"):
|
||||||
|
"""Get channel binding data for current connection. Raise ValueError
|
||||||
|
if the requested `cb_type` is not supported. Return bytes of the data
|
||||||
|
or None if the data is not available (e.g. before the handshake).
|
||||||
|
"""
|
||||||
|
if cb_type not in CHANNEL_BINDING_TYPES:
|
||||||
|
raise ValueError("Unsupported channel binding type")
|
||||||
|
if cb_type != "tls-unique":
|
||||||
|
raise NotImplementedError("{0} channel binding type not implemented".format(cb_type))
|
||||||
|
if self._sslobj is None:
|
||||||
|
return None
|
||||||
|
return self._sslobj.tls_unique_cb()
|
||||||
|
|
||||||
|
|
||||||
|
# Python 3.2 onwards raise normal timeout errors, not SSLError.
|
||||||
|
# See https://bugs.python.org/issue10272
|
||||||
|
_SSLErrorReadTimeout = _socket_timeout('The read operation timed out')
|
||||||
|
_SSLErrorWriteTimeout = _socket_timeout('The write operation timed out')
|
||||||
|
_SSLErrorHandshakeTimeout = _socket_timeout('The handshake operation timed out')
|
||||||
|
|
||||||
|
|
||||||
|
def wrap_socket(sock, keyfile=None, certfile=None,
|
||||||
|
server_side=False, cert_reqs=CERT_NONE,
|
||||||
|
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
|
||||||
|
do_handshake_on_connect=True,
|
||||||
|
suppress_ragged_eofs=True,
|
||||||
|
ciphers=None):
|
||||||
|
|
||||||
|
return SSLSocket(sock=sock, keyfile=keyfile, certfile=certfile,
|
||||||
|
server_side=server_side, cert_reqs=cert_reqs,
|
||||||
|
ssl_version=ssl_version, ca_certs=ca_certs,
|
||||||
|
do_handshake_on_connect=do_handshake_on_connect,
|
||||||
|
suppress_ragged_eofs=suppress_ragged_eofs,
|
||||||
|
ciphers=ciphers)
|
||||||
|
|
||||||
|
|
||||||
|
def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv23, ca_certs=None):
|
||||||
|
"""Retrieve the certificate from the server at the specified address,
|
||||||
|
and return it as a PEM-encoded string.
|
||||||
|
If 'ca_certs' is specified, validate the server cert against it.
|
||||||
|
If 'ssl_version' is specified, use it in the connection attempt."""
|
||||||
|
|
||||||
|
_, _ = addr
|
||||||
|
if ca_certs is not None:
|
||||||
|
cert_reqs = CERT_REQUIRED
|
||||||
|
else:
|
||||||
|
cert_reqs = CERT_NONE
|
||||||
|
s = create_connection(addr)
|
||||||
|
s = wrap_socket(s, ssl_version=ssl_version,
|
||||||
|
cert_reqs=cert_reqs, ca_certs=ca_certs)
|
||||||
|
dercert = s.getpeercert(True)
|
||||||
|
s.close()
|
||||||
|
return DER_cert_to_PEM_cert(dercert)
|
||||||
714
python/gevent/_sslgte279.py
Normal file
714
python/gevent/_sslgte279.py
Normal file
@@ -0,0 +1,714 @@
|
|||||||
|
# Wrapper module for _ssl. Written by Bill Janssen.
|
||||||
|
# Ported to gevent by Denis Bilenko.
|
||||||
|
"""SSL wrapper for socket objects on Python 2.7.9 and above.
|
||||||
|
|
||||||
|
For the documentation, refer to :mod:`ssl` module manual.
|
||||||
|
|
||||||
|
This module implements cooperative SSL socket wrappers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
|
# Our import magic sadly makes this warning useless
|
||||||
|
# pylint: disable=undefined-variable
|
||||||
|
# pylint: disable=too-many-instance-attributes,too-many-locals,too-many-statements,too-many-branches
|
||||||
|
# pylint: disable=arguments-differ,too-many-public-methods
|
||||||
|
|
||||||
|
import ssl as __ssl__
|
||||||
|
|
||||||
|
_ssl = __ssl__._ssl # pylint:disable=no-member
|
||||||
|
|
||||||
|
import errno
|
||||||
|
from gevent._socket2 import socket
|
||||||
|
from gevent.socket import timeout_default
|
||||||
|
from gevent.socket import create_connection
|
||||||
|
from gevent.socket import error as socket_error
|
||||||
|
from gevent.socket import timeout as _socket_timeout
|
||||||
|
from gevent._compat import PYPY
|
||||||
|
from gevent._util import copy_globals
|
||||||
|
|
||||||
|
__implements__ = [
|
||||||
|
'SSLContext',
|
||||||
|
'SSLSocket',
|
||||||
|
'wrap_socket',
|
||||||
|
'get_server_certificate',
|
||||||
|
'create_default_context',
|
||||||
|
'_create_unverified_context',
|
||||||
|
'_create_default_https_context',
|
||||||
|
'_create_stdlib_context',
|
||||||
|
]
|
||||||
|
|
||||||
|
# Import all symbols from Python's ssl.py, except those that we are implementing
|
||||||
|
# and "private" symbols.
|
||||||
|
__imports__ = copy_globals(__ssl__, globals(),
|
||||||
|
# SSLSocket *must* subclass gevent.socket.socket; see issue 597 and 801
|
||||||
|
names_to_ignore=__implements__ + ['socket', 'create_connection'],
|
||||||
|
dunder_names_to_keep=())
|
||||||
|
|
||||||
|
try:
|
||||||
|
_delegate_methods
|
||||||
|
except NameError: # PyPy doesn't expose this detail
|
||||||
|
_delegate_methods = ('recv', 'recvfrom', 'recv_into', 'recvfrom_into', 'send', 'sendto')
|
||||||
|
|
||||||
|
__all__ = __implements__ + __imports__
|
||||||
|
if 'namedtuple' in __all__:
|
||||||
|
__all__.remove('namedtuple')
|
||||||
|
|
||||||
|
orig_SSLContext = __ssl__.SSLContext # pylint: disable=no-member
|
||||||
|
|
||||||
|
|
||||||
|
class SSLContext(orig_SSLContext):
|
||||||
|
def wrap_socket(self, sock, server_side=False,
|
||||||
|
do_handshake_on_connect=True,
|
||||||
|
suppress_ragged_eofs=True,
|
||||||
|
server_hostname=None):
|
||||||
|
return SSLSocket(sock=sock, server_side=server_side,
|
||||||
|
do_handshake_on_connect=do_handshake_on_connect,
|
||||||
|
suppress_ragged_eofs=suppress_ragged_eofs,
|
||||||
|
server_hostname=server_hostname,
|
||||||
|
_context=self)
|
||||||
|
|
||||||
|
|
||||||
|
def create_default_context(purpose=Purpose.SERVER_AUTH, cafile=None,
|
||||||
|
capath=None, cadata=None):
|
||||||
|
"""Create a SSLContext object with default settings.
|
||||||
|
|
||||||
|
NOTE: The protocol and settings may change anytime without prior
|
||||||
|
deprecation. The values represent a fair balance between maximum
|
||||||
|
compatibility and security.
|
||||||
|
"""
|
||||||
|
if not isinstance(purpose, _ASN1Object):
|
||||||
|
raise TypeError(purpose)
|
||||||
|
|
||||||
|
context = SSLContext(PROTOCOL_SSLv23)
|
||||||
|
|
||||||
|
# SSLv2 considered harmful.
|
||||||
|
context.options |= OP_NO_SSLv2
|
||||||
|
|
||||||
|
# SSLv3 has problematic security and is only required for really old
|
||||||
|
# clients such as IE6 on Windows XP
|
||||||
|
context.options |= OP_NO_SSLv3
|
||||||
|
|
||||||
|
# disable compression to prevent CRIME attacks (OpenSSL 1.0+)
|
||||||
|
context.options |= getattr(_ssl, "OP_NO_COMPRESSION", 0)
|
||||||
|
|
||||||
|
if purpose == Purpose.SERVER_AUTH:
|
||||||
|
# verify certs and host name in client mode
|
||||||
|
context.verify_mode = CERT_REQUIRED
|
||||||
|
context.check_hostname = True # pylint: disable=attribute-defined-outside-init
|
||||||
|
elif purpose == Purpose.CLIENT_AUTH:
|
||||||
|
# Prefer the server's ciphers by default so that we get stronger
|
||||||
|
# encryption
|
||||||
|
context.options |= getattr(_ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
|
||||||
|
|
||||||
|
# Use single use keys in order to improve forward secrecy
|
||||||
|
context.options |= getattr(_ssl, "OP_SINGLE_DH_USE", 0)
|
||||||
|
context.options |= getattr(_ssl, "OP_SINGLE_ECDH_USE", 0)
|
||||||
|
|
||||||
|
# disallow ciphers with known vulnerabilities
|
||||||
|
context.set_ciphers(_RESTRICTED_SERVER_CIPHERS)
|
||||||
|
|
||||||
|
if cafile or capath or cadata:
|
||||||
|
context.load_verify_locations(cafile, capath, cadata)
|
||||||
|
elif context.verify_mode != CERT_NONE:
|
||||||
|
# no explicit cafile, capath or cadata but the verify mode is
|
||||||
|
# CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system
|
||||||
|
# root CA certificates for the given purpose. This may fail silently.
|
||||||
|
context.load_default_certs(purpose)
|
||||||
|
return context
|
||||||
|
|
||||||
|
def _create_unverified_context(protocol=PROTOCOL_SSLv23, cert_reqs=None,
|
||||||
|
check_hostname=False, purpose=Purpose.SERVER_AUTH,
|
||||||
|
certfile=None, keyfile=None,
|
||||||
|
cafile=None, capath=None, cadata=None):
|
||||||
|
"""Create a SSLContext object for Python stdlib modules
|
||||||
|
|
||||||
|
All Python stdlib modules shall use this function to create SSLContext
|
||||||
|
objects in order to keep common settings in one place. The configuration
|
||||||
|
is less restrict than create_default_context()'s to increase backward
|
||||||
|
compatibility.
|
||||||
|
"""
|
||||||
|
if not isinstance(purpose, _ASN1Object):
|
||||||
|
raise TypeError(purpose)
|
||||||
|
|
||||||
|
context = SSLContext(protocol)
|
||||||
|
# SSLv2 considered harmful.
|
||||||
|
context.options |= OP_NO_SSLv2
|
||||||
|
# SSLv3 has problematic security and is only required for really old
|
||||||
|
# clients such as IE6 on Windows XP
|
||||||
|
context.options |= OP_NO_SSLv3
|
||||||
|
|
||||||
|
if cert_reqs is not None:
|
||||||
|
context.verify_mode = cert_reqs
|
||||||
|
context.check_hostname = check_hostname # pylint: disable=attribute-defined-outside-init
|
||||||
|
|
||||||
|
if keyfile and not certfile:
|
||||||
|
raise ValueError("certfile must be specified")
|
||||||
|
if certfile or keyfile:
|
||||||
|
context.load_cert_chain(certfile, keyfile)
|
||||||
|
|
||||||
|
# load CA root certs
|
||||||
|
if cafile or capath or cadata:
|
||||||
|
context.load_verify_locations(cafile, capath, cadata)
|
||||||
|
elif context.verify_mode != CERT_NONE:
|
||||||
|
# no explicit cafile, capath or cadata but the verify mode is
|
||||||
|
# CERT_OPTIONAL or CERT_REQUIRED. Let's try to load default system
|
||||||
|
# root CA certificates for the given purpose. This may fail silently.
|
||||||
|
context.load_default_certs(purpose)
|
||||||
|
|
||||||
|
return context
|
||||||
|
|
||||||
|
# Used by http.client if no context is explicitly passed.
|
||||||
|
_create_default_https_context = create_default_context
|
||||||
|
|
||||||
|
|
||||||
|
# Backwards compatibility alias, even though it's not a public name.
|
||||||
|
_create_stdlib_context = _create_unverified_context
|
||||||
|
|
||||||
|
class SSLSocket(socket):
|
||||||
|
"""
|
||||||
|
gevent `ssl.SSLSocket <https://docs.python.org/2/library/ssl.html#ssl-sockets>`_
|
||||||
|
for Pythons >= 2.7.9 but less than 3.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, sock=None, keyfile=None, certfile=None,
|
||||||
|
server_side=False, cert_reqs=CERT_NONE,
|
||||||
|
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
|
||||||
|
do_handshake_on_connect=True,
|
||||||
|
family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None,
|
||||||
|
suppress_ragged_eofs=True, npn_protocols=None, ciphers=None,
|
||||||
|
server_hostname=None,
|
||||||
|
_context=None):
|
||||||
|
# fileno is ignored
|
||||||
|
# pylint: disable=unused-argument
|
||||||
|
if _context:
|
||||||
|
self._context = _context
|
||||||
|
else:
|
||||||
|
if server_side and not certfile:
|
||||||
|
raise ValueError("certfile must be specified for server-side "
|
||||||
|
"operations")
|
||||||
|
if keyfile and not certfile:
|
||||||
|
raise ValueError("certfile must be specified")
|
||||||
|
if certfile and not keyfile:
|
||||||
|
keyfile = certfile
|
||||||
|
self._context = SSLContext(ssl_version)
|
||||||
|
self._context.verify_mode = cert_reqs
|
||||||
|
if ca_certs:
|
||||||
|
self._context.load_verify_locations(ca_certs)
|
||||||
|
if certfile:
|
||||||
|
self._context.load_cert_chain(certfile, keyfile)
|
||||||
|
if npn_protocols:
|
||||||
|
self._context.set_npn_protocols(npn_protocols)
|
||||||
|
if ciphers:
|
||||||
|
self._context.set_ciphers(ciphers)
|
||||||
|
self.keyfile = keyfile
|
||||||
|
self.certfile = certfile
|
||||||
|
self.cert_reqs = cert_reqs
|
||||||
|
self.ssl_version = ssl_version
|
||||||
|
self.ca_certs = ca_certs
|
||||||
|
self.ciphers = ciphers
|
||||||
|
# Can't use sock.type as other flags (such as SOCK_NONBLOCK) get
|
||||||
|
# mixed in.
|
||||||
|
if sock.getsockopt(SOL_SOCKET, SO_TYPE) != SOCK_STREAM:
|
||||||
|
raise NotImplementedError("only stream sockets are supported")
|
||||||
|
|
||||||
|
if PYPY:
|
||||||
|
socket.__init__(self, _sock=sock)
|
||||||
|
sock._drop()
|
||||||
|
else:
|
||||||
|
# CPython: XXX: Must pass the underlying socket, not our
|
||||||
|
# potential wrapper; test___example_servers fails the SSL test
|
||||||
|
# with a client-side EOF error. (Why?)
|
||||||
|
socket.__init__(self, _sock=sock._sock)
|
||||||
|
|
||||||
|
# The initializer for socket overrides the methods send(), recv(), etc.
|
||||||
|
# in the instance, which we don't need -- but we want to provide the
|
||||||
|
# methods defined in SSLSocket.
|
||||||
|
for attr in _delegate_methods:
|
||||||
|
try:
|
||||||
|
delattr(self, attr)
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
if server_side and server_hostname:
|
||||||
|
raise ValueError("server_hostname can only be specified "
|
||||||
|
"in client mode")
|
||||||
|
if self._context.check_hostname and not server_hostname:
|
||||||
|
raise ValueError("check_hostname requires server_hostname")
|
||||||
|
self.server_side = server_side
|
||||||
|
self.server_hostname = server_hostname
|
||||||
|
self.do_handshake_on_connect = do_handshake_on_connect
|
||||||
|
self.suppress_ragged_eofs = suppress_ragged_eofs
|
||||||
|
self.settimeout(sock.gettimeout())
|
||||||
|
|
||||||
|
# See if we are connected
|
||||||
|
try:
|
||||||
|
self.getpeername()
|
||||||
|
except socket_error as e:
|
||||||
|
if e.errno != errno.ENOTCONN:
|
||||||
|
raise
|
||||||
|
connected = False
|
||||||
|
else:
|
||||||
|
connected = True
|
||||||
|
|
||||||
|
self._makefile_refs = 0
|
||||||
|
self._closed = False
|
||||||
|
self._sslobj = None
|
||||||
|
self._connected = connected
|
||||||
|
if connected:
|
||||||
|
# create the SSL object
|
||||||
|
try:
|
||||||
|
self._sslobj = self._context._wrap_socket(self._sock, server_side,
|
||||||
|
server_hostname, ssl_sock=self)
|
||||||
|
if do_handshake_on_connect:
|
||||||
|
timeout = self.gettimeout()
|
||||||
|
if timeout == 0.0:
|
||||||
|
# non-blocking
|
||||||
|
raise ValueError("do_handshake_on_connect should not be specified for non-blocking sockets")
|
||||||
|
self.do_handshake()
|
||||||
|
|
||||||
|
except socket_error as x:
|
||||||
|
self.close()
|
||||||
|
raise x
|
||||||
|
|
||||||
|
|
||||||
|
@property
|
||||||
|
def context(self):
|
||||||
|
return self._context
|
||||||
|
|
||||||
|
@context.setter
|
||||||
|
def context(self, ctx):
|
||||||
|
self._context = ctx
|
||||||
|
self._sslobj.context = ctx
|
||||||
|
|
||||||
|
def dup(self):
|
||||||
|
raise NotImplementedError("Can't dup() %s instances" %
|
||||||
|
self.__class__.__name__)
|
||||||
|
|
||||||
|
def _checkClosed(self, msg=None):
|
||||||
|
# raise an exception here if you wish to check for spurious closes
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _check_connected(self):
|
||||||
|
if not self._connected:
|
||||||
|
# getpeername() will raise ENOTCONN if the socket is really
|
||||||
|
# not connected; note that we can be connected even without
|
||||||
|
# _connected being set, e.g. if connect() first returned
|
||||||
|
# EAGAIN.
|
||||||
|
self.getpeername()
|
||||||
|
|
||||||
|
def read(self, len=1024, buffer=None):
|
||||||
|
"""Read up to LEN bytes and return them.
|
||||||
|
Return zero-length string on EOF."""
|
||||||
|
self._checkClosed()
|
||||||
|
|
||||||
|
while 1:
|
||||||
|
if not self._sslobj:
|
||||||
|
raise ValueError("Read on closed or unwrapped SSL socket.")
|
||||||
|
if len == 0:
|
||||||
|
return b'' if buffer is None else 0
|
||||||
|
if len < 0 and buffer is None:
|
||||||
|
# This is handled natively in python 2.7.12+
|
||||||
|
raise ValueError("Negative read length")
|
||||||
|
try:
|
||||||
|
if buffer is not None:
|
||||||
|
return self._sslobj.read(len, buffer)
|
||||||
|
return self._sslobj.read(len or 1024)
|
||||||
|
except SSLWantReadError:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
self._wait(self._read_event, timeout_exc=_SSLErrorReadTimeout)
|
||||||
|
except SSLWantWriteError:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
# note: using _SSLErrorReadTimeout rather than _SSLErrorWriteTimeout below is intentional
|
||||||
|
self._wait(self._write_event, timeout_exc=_SSLErrorReadTimeout)
|
||||||
|
except SSLError as ex:
|
||||||
|
if ex.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
|
||||||
|
if buffer is not None:
|
||||||
|
return 0
|
||||||
|
return b''
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def write(self, data):
|
||||||
|
"""Write DATA to the underlying SSL channel. Returns
|
||||||
|
number of bytes of DATA actually transmitted."""
|
||||||
|
self._checkClosed()
|
||||||
|
|
||||||
|
while 1:
|
||||||
|
if not self._sslobj:
|
||||||
|
raise ValueError("Write on closed or unwrapped SSL socket.")
|
||||||
|
|
||||||
|
try:
|
||||||
|
return self._sslobj.write(data)
|
||||||
|
except SSLError as ex:
|
||||||
|
if ex.args[0] == SSL_ERROR_WANT_READ:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
self._wait(self._read_event, timeout_exc=_SSLErrorWriteTimeout)
|
||||||
|
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
self._wait(self._write_event, timeout_exc=_SSLErrorWriteTimeout)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def getpeercert(self, binary_form=False):
|
||||||
|
"""Returns a formatted version of the data in the
|
||||||
|
certificate provided by the other end of the SSL channel.
|
||||||
|
Return None if no certificate was provided, {} if a
|
||||||
|
certificate was provided, but not validated."""
|
||||||
|
|
||||||
|
self._checkClosed()
|
||||||
|
self._check_connected()
|
||||||
|
return self._sslobj.peer_certificate(binary_form)
|
||||||
|
|
||||||
|
def selected_npn_protocol(self):
|
||||||
|
self._checkClosed()
|
||||||
|
if not self._sslobj or not _ssl.HAS_NPN:
|
||||||
|
return None
|
||||||
|
return self._sslobj.selected_npn_protocol()
|
||||||
|
|
||||||
|
if hasattr(_ssl, 'HAS_ALPN'):
|
||||||
|
# 2.7.10+
|
||||||
|
def selected_alpn_protocol(self):
|
||||||
|
self._checkClosed()
|
||||||
|
if not self._sslobj or not _ssl.HAS_ALPN: # pylint:disable=no-member
|
||||||
|
return None
|
||||||
|
return self._sslobj.selected_alpn_protocol()
|
||||||
|
|
||||||
|
def cipher(self):
|
||||||
|
self._checkClosed()
|
||||||
|
if not self._sslobj:
|
||||||
|
return None
|
||||||
|
return self._sslobj.cipher()
|
||||||
|
|
||||||
|
def compression(self):
|
||||||
|
self._checkClosed()
|
||||||
|
if not self._sslobj:
|
||||||
|
return None
|
||||||
|
return self._sslobj.compression()
|
||||||
|
|
||||||
|
def __check_flags(self, meth, flags):
|
||||||
|
if flags != 0:
|
||||||
|
raise ValueError(
|
||||||
|
"non-zero flags not allowed in calls to %s on %s" %
|
||||||
|
(meth, self.__class__))
|
||||||
|
|
||||||
|
def send(self, data, flags=0, timeout=timeout_default):
|
||||||
|
self._checkClosed()
|
||||||
|
self.__check_flags('send', flags)
|
||||||
|
|
||||||
|
if timeout is timeout_default:
|
||||||
|
timeout = self.timeout
|
||||||
|
|
||||||
|
if not self._sslobj:
|
||||||
|
return socket.send(self, data, flags, timeout)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return self._sslobj.write(data)
|
||||||
|
except SSLWantReadError:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
return 0
|
||||||
|
self._wait(self._read_event)
|
||||||
|
except SSLWantWriteError:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
return 0
|
||||||
|
self._wait(self._write_event)
|
||||||
|
|
||||||
|
def sendto(self, data, flags_or_addr, addr=None):
|
||||||
|
self._checkClosed()
|
||||||
|
if self._sslobj:
|
||||||
|
raise ValueError("sendto not allowed on instances of %s" %
|
||||||
|
self.__class__)
|
||||||
|
elif addr is None:
|
||||||
|
return socket.sendto(self, data, flags_or_addr)
|
||||||
|
else:
|
||||||
|
return socket.sendto(self, data, flags_or_addr, addr)
|
||||||
|
|
||||||
|
def sendmsg(self, *args, **kwargs):
|
||||||
|
# Ensure programs don't send data unencrypted if they try to
|
||||||
|
# use this method.
|
||||||
|
raise NotImplementedError("sendmsg not allowed on instances of %s" %
|
||||||
|
self.__class__)
|
||||||
|
|
||||||
|
def sendall(self, data, flags=0):
|
||||||
|
self._checkClosed()
|
||||||
|
self.__check_flags('sendall', flags)
|
||||||
|
|
||||||
|
try:
|
||||||
|
socket.sendall(self, data)
|
||||||
|
except _socket_timeout as ex:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
# Python 2 simply *hangs* in this case, which is bad, but
|
||||||
|
# Python 3 raises SSLWantWriteError. We do the same.
|
||||||
|
raise SSLWantWriteError("The operation did not complete (write)")
|
||||||
|
# Convert the socket.timeout back to the sslerror
|
||||||
|
raise SSLError(*ex.args)
|
||||||
|
|
||||||
|
def recv(self, buflen=1024, flags=0):
|
||||||
|
self._checkClosed()
|
||||||
|
if self._sslobj:
|
||||||
|
if flags != 0:
|
||||||
|
raise ValueError(
|
||||||
|
"non-zero flags not allowed in calls to recv() on %s" %
|
||||||
|
self.__class__)
|
||||||
|
if buflen == 0:
|
||||||
|
return b''
|
||||||
|
return self.read(buflen)
|
||||||
|
else:
|
||||||
|
return socket.recv(self, buflen, flags)
|
||||||
|
|
||||||
|
def recv_into(self, buffer, nbytes=None, flags=0):
|
||||||
|
self._checkClosed()
|
||||||
|
if buffer is not None and (nbytes is None):
|
||||||
|
# Fix for python bug #23804: bool(bytearray()) is False,
|
||||||
|
# but we should read 0 bytes.
|
||||||
|
nbytes = len(buffer)
|
||||||
|
elif nbytes is None:
|
||||||
|
nbytes = 1024
|
||||||
|
if self._sslobj:
|
||||||
|
if flags != 0:
|
||||||
|
raise ValueError(
|
||||||
|
"non-zero flags not allowed in calls to recv_into() on %s" %
|
||||||
|
self.__class__)
|
||||||
|
return self.read(nbytes, buffer)
|
||||||
|
else:
|
||||||
|
return socket.recv_into(self, buffer, nbytes, flags)
|
||||||
|
|
||||||
|
def recvfrom(self, buflen=1024, flags=0):
|
||||||
|
self._checkClosed()
|
||||||
|
if self._sslobj:
|
||||||
|
raise ValueError("recvfrom not allowed on instances of %s" %
|
||||||
|
self.__class__)
|
||||||
|
else:
|
||||||
|
return socket.recvfrom(self, buflen, flags)
|
||||||
|
|
||||||
|
def recvfrom_into(self, buffer, nbytes=None, flags=0):
|
||||||
|
self._checkClosed()
|
||||||
|
if self._sslobj:
|
||||||
|
raise ValueError("recvfrom_into not allowed on instances of %s" %
|
||||||
|
self.__class__)
|
||||||
|
else:
|
||||||
|
return socket.recvfrom_into(self, buffer, nbytes, flags)
|
||||||
|
|
||||||
|
def recvmsg(self, *args, **kwargs):
|
||||||
|
raise NotImplementedError("recvmsg not allowed on instances of %s" %
|
||||||
|
self.__class__)
|
||||||
|
|
||||||
|
def recvmsg_into(self, *args, **kwargs):
|
||||||
|
raise NotImplementedError("recvmsg_into not allowed on instances of "
|
||||||
|
"%s" % self.__class__)
|
||||||
|
|
||||||
|
def pending(self):
|
||||||
|
self._checkClosed()
|
||||||
|
if self._sslobj:
|
||||||
|
return self._sslobj.pending()
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def shutdown(self, how):
|
||||||
|
self._checkClosed()
|
||||||
|
self._sslobj = None
|
||||||
|
socket.shutdown(self, how)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if self._makefile_refs < 1:
|
||||||
|
self._sslobj = None
|
||||||
|
socket.close(self)
|
||||||
|
else:
|
||||||
|
self._makefile_refs -= 1
|
||||||
|
|
||||||
|
if PYPY:
|
||||||
|
|
||||||
|
def _reuse(self):
|
||||||
|
self._makefile_refs += 1
|
||||||
|
|
||||||
|
def _drop(self):
|
||||||
|
if self._makefile_refs < 1:
|
||||||
|
self.close()
|
||||||
|
else:
|
||||||
|
self._makefile_refs -= 1
|
||||||
|
|
||||||
|
def _sslobj_shutdown(self):
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return self._sslobj.shutdown()
|
||||||
|
except SSLError as ex:
|
||||||
|
if ex.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
|
||||||
|
return ''
|
||||||
|
elif ex.args[0] == SSL_ERROR_WANT_READ:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._read_event, timeout_exc=_SSLErrorReadTimeout)
|
||||||
|
elif ex.args[0] == SSL_ERROR_WANT_WRITE:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
sys.exc_clear()
|
||||||
|
self._wait(self._write_event, timeout_exc=_SSLErrorWriteTimeout)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def unwrap(self):
|
||||||
|
if self._sslobj:
|
||||||
|
s = self._sslobj_shutdown()
|
||||||
|
self._sslobj = None
|
||||||
|
return socket(_sock=s) # match _ssl2; critical to drop/reuse here on PyPy
|
||||||
|
else:
|
||||||
|
raise ValueError("No SSL wrapper around " + str(self))
|
||||||
|
|
||||||
|
def _real_close(self):
|
||||||
|
self._sslobj = None
|
||||||
|
socket._real_close(self) # pylint: disable=no-member
|
||||||
|
|
||||||
|
def do_handshake(self):
|
||||||
|
"""Perform a TLS/SSL handshake."""
|
||||||
|
self._check_connected()
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
self._sslobj.do_handshake()
|
||||||
|
break
|
||||||
|
except SSLWantReadError:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
self._wait(self._read_event, timeout_exc=_SSLErrorHandshakeTimeout)
|
||||||
|
except SSLWantWriteError:
|
||||||
|
if self.timeout == 0.0:
|
||||||
|
raise
|
||||||
|
self._wait(self._write_event, timeout_exc=_SSLErrorHandshakeTimeout)
|
||||||
|
|
||||||
|
if self._context.check_hostname:
|
||||||
|
if not self.server_hostname:
|
||||||
|
raise ValueError("check_hostname needs server_hostname "
|
||||||
|
"argument")
|
||||||
|
match_hostname(self.getpeercert(), self.server_hostname)
|
||||||
|
|
||||||
|
def _real_connect(self, addr, connect_ex):
|
||||||
|
if self.server_side:
|
||||||
|
raise ValueError("can't connect in server-side mode")
|
||||||
|
# Here we assume that the socket is client-side, and not
|
||||||
|
# connected at the time of the call. We connect it, then wrap it.
|
||||||
|
if self._connected:
|
||||||
|
raise ValueError("attempt to connect already-connected SSLSocket!")
|
||||||
|
self._sslobj = self._context._wrap_socket(self._sock, False, self.server_hostname, ssl_sock=self)
|
||||||
|
try:
|
||||||
|
if connect_ex:
|
||||||
|
rc = socket.connect_ex(self, addr)
|
||||||
|
else:
|
||||||
|
rc = None
|
||||||
|
socket.connect(self, addr)
|
||||||
|
if not rc:
|
||||||
|
self._connected = True
|
||||||
|
if self.do_handshake_on_connect:
|
||||||
|
self.do_handshake()
|
||||||
|
return rc
|
||||||
|
except socket_error:
|
||||||
|
self._sslobj = None
|
||||||
|
raise
|
||||||
|
|
||||||
|
def connect(self, addr):
|
||||||
|
"""Connects to remote ADDR, and then wraps the connection in
|
||||||
|
an SSL channel."""
|
||||||
|
self._real_connect(addr, False)
|
||||||
|
|
||||||
|
def connect_ex(self, addr):
|
||||||
|
"""Connects to remote ADDR, and then wraps the connection in
|
||||||
|
an SSL channel."""
|
||||||
|
return self._real_connect(addr, True)
|
||||||
|
|
||||||
|
def accept(self):
|
||||||
|
"""Accepts a new connection from a remote client, and returns
|
||||||
|
a tuple containing that new connection wrapped with a server-side
|
||||||
|
SSL channel, and the address of the remote client."""
|
||||||
|
|
||||||
|
newsock, addr = socket.accept(self)
|
||||||
|
newsock = self._context.wrap_socket(newsock,
|
||||||
|
do_handshake_on_connect=self.do_handshake_on_connect,
|
||||||
|
suppress_ragged_eofs=self.suppress_ragged_eofs,
|
||||||
|
server_side=True)
|
||||||
|
return newsock, addr
|
||||||
|
|
||||||
|
def makefile(self, mode='r', bufsize=-1):
|
||||||
|
|
||||||
|
"""Make and return a file-like object that
|
||||||
|
works with the SSL connection. Just use the code
|
||||||
|
from the socket module."""
|
||||||
|
if not PYPY:
|
||||||
|
self._makefile_refs += 1
|
||||||
|
# close=True so as to decrement the reference count when done with
|
||||||
|
# the file-like object.
|
||||||
|
return _fileobject(self, mode, bufsize, close=True)
|
||||||
|
|
||||||
|
def get_channel_binding(self, cb_type="tls-unique"):
|
||||||
|
"""Get channel binding data for current connection. Raise ValueError
|
||||||
|
if the requested `cb_type` is not supported. Return bytes of the data
|
||||||
|
or None if the data is not available (e.g. before the handshake).
|
||||||
|
"""
|
||||||
|
if cb_type not in CHANNEL_BINDING_TYPES:
|
||||||
|
raise ValueError("Unsupported channel binding type")
|
||||||
|
if cb_type != "tls-unique":
|
||||||
|
raise NotImplementedError(
|
||||||
|
"{0} channel binding type not implemented"
|
||||||
|
.format(cb_type))
|
||||||
|
if self._sslobj is None:
|
||||||
|
return None
|
||||||
|
return self._sslobj.tls_unique_cb()
|
||||||
|
|
||||||
|
def version(self):
|
||||||
|
"""
|
||||||
|
Return a string identifying the protocol version used by the
|
||||||
|
current SSL channel, or None if there is no established channel.
|
||||||
|
"""
|
||||||
|
if self._sslobj is None:
|
||||||
|
return None
|
||||||
|
return self._sslobj.version()
|
||||||
|
|
||||||
|
if PYPY or not hasattr(SSLSocket, 'timeout'):
|
||||||
|
# PyPy (and certain versions of CPython) doesn't have a direct
|
||||||
|
# 'timeout' property on raw sockets, because that's not part of
|
||||||
|
# the documented specification. We may wind up wrapping a raw
|
||||||
|
# socket (when ssl is used with PyWSGI) or a gevent socket, which
|
||||||
|
# does have a read/write timeout property as an alias for
|
||||||
|
# get/settimeout, so make sure that's always the case because
|
||||||
|
# pywsgi can depend on that.
|
||||||
|
SSLSocket.timeout = property(lambda self: self.gettimeout(),
|
||||||
|
lambda self, value: self.settimeout(value))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
_SSLErrorReadTimeout = SSLError('The read operation timed out')
|
||||||
|
_SSLErrorWriteTimeout = SSLError('The write operation timed out')
|
||||||
|
_SSLErrorHandshakeTimeout = SSLError('The handshake operation timed out')
|
||||||
|
|
||||||
|
def wrap_socket(sock, keyfile=None, certfile=None,
|
||||||
|
server_side=False, cert_reqs=CERT_NONE,
|
||||||
|
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
|
||||||
|
do_handshake_on_connect=True,
|
||||||
|
suppress_ragged_eofs=True,
|
||||||
|
ciphers=None):
|
||||||
|
|
||||||
|
return SSLSocket(sock=sock, keyfile=keyfile, certfile=certfile,
|
||||||
|
server_side=server_side, cert_reqs=cert_reqs,
|
||||||
|
ssl_version=ssl_version, ca_certs=ca_certs,
|
||||||
|
do_handshake_on_connect=do_handshake_on_connect,
|
||||||
|
suppress_ragged_eofs=suppress_ragged_eofs,
|
||||||
|
ciphers=ciphers)
|
||||||
|
|
||||||
|
def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv23, ca_certs=None):
|
||||||
|
"""Retrieve the certificate from the server at the specified address,
|
||||||
|
and return it as a PEM-encoded string.
|
||||||
|
If 'ca_certs' is specified, validate the server cert against it.
|
||||||
|
If 'ssl_version' is specified, use it in the connection attempt."""
|
||||||
|
|
||||||
|
_, _ = addr
|
||||||
|
if ca_certs is not None:
|
||||||
|
cert_reqs = CERT_REQUIRED
|
||||||
|
else:
|
||||||
|
cert_reqs = CERT_NONE
|
||||||
|
context = _create_stdlib_context(ssl_version,
|
||||||
|
cert_reqs=cert_reqs,
|
||||||
|
cafile=ca_certs)
|
||||||
|
with closing(create_connection(addr)) as sock:
|
||||||
|
with closing(context.wrap_socket(sock)) as sslsock:
|
||||||
|
dercert = sslsock.getpeercert(True)
|
||||||
|
return DER_cert_to_PEM_cert(dercert)
|
||||||
431
python/gevent/_tblib.py
Normal file
431
python/gevent/_tblib.py
Normal file
@@ -0,0 +1,431 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# A vendored version of part of https://github.com/ionelmc/python-tblib
|
||||||
|
# pylint:disable=redefined-outer-name,reimported,function-redefined,bare-except,no-else-return,broad-except
|
||||||
|
####
|
||||||
|
# Copyright (c) 2013-2016, Ionel Cristian Mărieș
|
||||||
|
# All rights reserved.
|
||||||
|
|
||||||
|
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
|
||||||
|
# following conditions are met:
|
||||||
|
|
||||||
|
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
|
||||||
|
# disclaimer.
|
||||||
|
|
||||||
|
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
|
||||||
|
# disclaimer in the documentation and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
|
||||||
|
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||||
|
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||||
|
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
####
|
||||||
|
|
||||||
|
# cpython.py
|
||||||
|
|
||||||
|
"""
|
||||||
|
Taken verbatim from Jinja2.
|
||||||
|
|
||||||
|
https://github.com/mitsuhiko/jinja2/blob/master/jinja2/debug.py#L267
|
||||||
|
"""
|
||||||
|
#import platform # XXX: gevent cannot import platform at the top level; interferes with monkey patching
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
def _init_ugly_crap():
|
||||||
|
"""This function implements a few ugly things so that we can patch the
|
||||||
|
traceback objects. The function returned allows resetting `tb_next` on
|
||||||
|
any python traceback object. Do not attempt to use this on non cpython
|
||||||
|
interpreters
|
||||||
|
"""
|
||||||
|
import ctypes
|
||||||
|
from types import TracebackType
|
||||||
|
|
||||||
|
# figure out side of _Py_ssize_t
|
||||||
|
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
|
||||||
|
_Py_ssize_t = ctypes.c_int64
|
||||||
|
else:
|
||||||
|
_Py_ssize_t = ctypes.c_int
|
||||||
|
|
||||||
|
# regular python
|
||||||
|
class _PyObject(ctypes.Structure):
|
||||||
|
pass
|
||||||
|
|
||||||
|
_PyObject._fields_ = [
|
||||||
|
('ob_refcnt', _Py_ssize_t),
|
||||||
|
('ob_type', ctypes.POINTER(_PyObject))
|
||||||
|
]
|
||||||
|
|
||||||
|
# python with trace
|
||||||
|
if hasattr(sys, 'getobjects'):
|
||||||
|
class _PyObject(ctypes.Structure):
|
||||||
|
pass
|
||||||
|
|
||||||
|
_PyObject._fields_ = [
|
||||||
|
('_ob_next', ctypes.POINTER(_PyObject)),
|
||||||
|
('_ob_prev', ctypes.POINTER(_PyObject)),
|
||||||
|
('ob_refcnt', _Py_ssize_t),
|
||||||
|
('ob_type', ctypes.POINTER(_PyObject))
|
||||||
|
]
|
||||||
|
|
||||||
|
class _Traceback(_PyObject):
|
||||||
|
pass
|
||||||
|
|
||||||
|
_Traceback._fields_ = [
|
||||||
|
('tb_next', ctypes.POINTER(_Traceback)),
|
||||||
|
('tb_frame', ctypes.POINTER(_PyObject)),
|
||||||
|
('tb_lasti', ctypes.c_int),
|
||||||
|
('tb_lineno', ctypes.c_int)
|
||||||
|
]
|
||||||
|
|
||||||
|
def tb_set_next(tb, next):
|
||||||
|
"""Set the tb_next attribute of a traceback object."""
|
||||||
|
if not (isinstance(tb, TracebackType) and (next is None or isinstance(next, TracebackType))):
|
||||||
|
raise TypeError('tb_set_next arguments must be traceback objects')
|
||||||
|
obj = _Traceback.from_address(id(tb))
|
||||||
|
if tb.tb_next is not None:
|
||||||
|
old = _Traceback.from_address(id(tb.tb_next))
|
||||||
|
old.ob_refcnt -= 1
|
||||||
|
if next is None:
|
||||||
|
obj.tb_next = ctypes.POINTER(_Traceback)()
|
||||||
|
else:
|
||||||
|
next = _Traceback.from_address(id(next))
|
||||||
|
next.ob_refcnt += 1
|
||||||
|
obj.tb_next = ctypes.pointer(next)
|
||||||
|
|
||||||
|
return tb_set_next
|
||||||
|
|
||||||
|
|
||||||
|
tb_set_next = None
|
||||||
|
#try:
|
||||||
|
# if platform.python_implementation() == 'CPython':
|
||||||
|
# tb_set_next = _init_ugly_crap()
|
||||||
|
#except Exception as exc:
|
||||||
|
# sys.stderr.write("Failed to initialize cpython support: {!r}".format(exc))
|
||||||
|
#del _init_ugly_crap
|
||||||
|
|
||||||
|
# __init__.py
|
||||||
|
import re
|
||||||
|
from types import CodeType
|
||||||
|
from types import TracebackType
|
||||||
|
|
||||||
|
try:
|
||||||
|
from __pypy__ import tproxy
|
||||||
|
except ImportError:
|
||||||
|
tproxy = None
|
||||||
|
|
||||||
|
__version__ = '1.3.0'
|
||||||
|
__all__ = ('Traceback',)
|
||||||
|
|
||||||
|
PY3 = sys.version_info[0] == 3
|
||||||
|
FRAME_RE = re.compile(r'^\s*File "(?P<co_filename>.+)", line (?P<tb_lineno>\d+)(, in (?P<co_name>.+))?$')
|
||||||
|
|
||||||
|
|
||||||
|
class _AttrDict(dict):
|
||||||
|
__slots__ = ()
|
||||||
|
__getattr__ = dict.__getitem__
|
||||||
|
|
||||||
|
|
||||||
|
# noinspection PyPep8Naming
|
||||||
|
class __traceback_maker(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class TracebackParseError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class Code(object):
|
||||||
|
def __init__(self, code):
|
||||||
|
self.co_filename = code.co_filename
|
||||||
|
self.co_name = code.co_name
|
||||||
|
# gevent: copy more attributes
|
||||||
|
self.co_nlocals = code.co_nlocals
|
||||||
|
self.co_stacksize = code.co_stacksize
|
||||||
|
self.co_flags = code.co_flags
|
||||||
|
self.co_firstlineno = code.co_firstlineno
|
||||||
|
|
||||||
|
|
||||||
|
class Frame(object):
|
||||||
|
def __init__(self, frame):
|
||||||
|
self.f_globals = dict([
|
||||||
|
(k, v)
|
||||||
|
for k, v in frame.f_globals.items()
|
||||||
|
if k in ("__file__", "__name__")
|
||||||
|
])
|
||||||
|
self.f_code = Code(frame.f_code)
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
# For compatibility with PyPy 3.5;
|
||||||
|
# clear was added to frame in Python 3.4
|
||||||
|
# and is called by traceback.clear_frames(), which
|
||||||
|
# in turn is called by unittest.TestCase.assertRaises
|
||||||
|
pass
|
||||||
|
|
||||||
|
class Traceback(object):
|
||||||
|
|
||||||
|
tb_next = None
|
||||||
|
|
||||||
|
def __init__(self, tb):
|
||||||
|
self.tb_frame = Frame(tb.tb_frame)
|
||||||
|
# noinspection SpellCheckingInspection
|
||||||
|
self.tb_lineno = int(tb.tb_lineno)
|
||||||
|
|
||||||
|
# Build in place to avoid exceeding the recursion limit
|
||||||
|
tb = tb.tb_next
|
||||||
|
prev_traceback = self
|
||||||
|
cls = type(self)
|
||||||
|
while tb is not None:
|
||||||
|
traceback = object.__new__(cls)
|
||||||
|
traceback.tb_frame = Frame(tb.tb_frame)
|
||||||
|
traceback.tb_lineno = int(tb.tb_lineno)
|
||||||
|
prev_traceback.tb_next = traceback
|
||||||
|
prev_traceback = traceback
|
||||||
|
tb = tb.tb_next
|
||||||
|
|
||||||
|
def as_traceback(self):
|
||||||
|
if tproxy:
|
||||||
|
return tproxy(TracebackType, self.__tproxy_handler)
|
||||||
|
if not tb_set_next:
|
||||||
|
raise RuntimeError("Cannot re-create traceback !")
|
||||||
|
|
||||||
|
current = self
|
||||||
|
top_tb = None
|
||||||
|
tb = None
|
||||||
|
while current:
|
||||||
|
f_code = current.tb_frame.f_code
|
||||||
|
code = compile('\n' * (current.tb_lineno - 1) + 'raise __traceback_maker', current.tb_frame.f_code.co_filename, 'exec')
|
||||||
|
if PY3:
|
||||||
|
code = CodeType(
|
||||||
|
0, code.co_kwonlyargcount,
|
||||||
|
code.co_nlocals, code.co_stacksize, code.co_flags,
|
||||||
|
code.co_code, code.co_consts, code.co_names, code.co_varnames,
|
||||||
|
f_code.co_filename, f_code.co_name,
|
||||||
|
code.co_firstlineno, code.co_lnotab, (), ()
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
code = CodeType(
|
||||||
|
0,
|
||||||
|
code.co_nlocals, code.co_stacksize, code.co_flags,
|
||||||
|
code.co_code, code.co_consts, code.co_names, code.co_varnames,
|
||||||
|
f_code.co_filename.encode(), f_code.co_name.encode(),
|
||||||
|
code.co_firstlineno, code.co_lnotab, (), ()
|
||||||
|
)
|
||||||
|
|
||||||
|
# noinspection PyBroadException
|
||||||
|
try:
|
||||||
|
exec(code, current.tb_frame.f_globals, {})
|
||||||
|
except:
|
||||||
|
next_tb = sys.exc_info()[2].tb_next
|
||||||
|
if top_tb is None:
|
||||||
|
top_tb = next_tb
|
||||||
|
if tb is not None:
|
||||||
|
tb_set_next(tb, next_tb)
|
||||||
|
tb = next_tb
|
||||||
|
del next_tb
|
||||||
|
|
||||||
|
current = current.tb_next
|
||||||
|
try:
|
||||||
|
return top_tb
|
||||||
|
finally:
|
||||||
|
del top_tb
|
||||||
|
del tb
|
||||||
|
|
||||||
|
|
||||||
|
# noinspection SpellCheckingInspection
|
||||||
|
def __tproxy_handler(self, operation, *args, **kwargs):
|
||||||
|
if operation in ('__getattribute__', '__getattr__'):
|
||||||
|
if args[0] == 'tb_next':
|
||||||
|
return self.tb_next and self.tb_next.as_traceback()
|
||||||
|
else:
|
||||||
|
return getattr(self, args[0])
|
||||||
|
else:
|
||||||
|
return getattr(self, operation)(*args, **kwargs)
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
"""Convert a Traceback into a dictionary representation"""
|
||||||
|
if self.tb_next is None:
|
||||||
|
tb_next = None
|
||||||
|
else:
|
||||||
|
tb_next = self.tb_next.to_dict()
|
||||||
|
|
||||||
|
code = {
|
||||||
|
'co_filename': self.tb_frame.f_code.co_filename,
|
||||||
|
'co_name': self.tb_frame.f_code.co_name,
|
||||||
|
}
|
||||||
|
frame = {
|
||||||
|
'f_globals': self.tb_frame.f_globals,
|
||||||
|
'f_code': code,
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
'tb_frame': frame,
|
||||||
|
'tb_lineno': self.tb_lineno,
|
||||||
|
'tb_next': tb_next,
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, dct):
|
||||||
|
if dct['tb_next']:
|
||||||
|
tb_next = cls.from_dict(dct['tb_next'])
|
||||||
|
else:
|
||||||
|
tb_next = None
|
||||||
|
|
||||||
|
code = _AttrDict(
|
||||||
|
co_filename=dct['tb_frame']['f_code']['co_filename'],
|
||||||
|
co_name=dct['tb_frame']['f_code']['co_name'],
|
||||||
|
)
|
||||||
|
frame = _AttrDict(
|
||||||
|
f_globals=dct['tb_frame']['f_globals'],
|
||||||
|
f_code=code,
|
||||||
|
)
|
||||||
|
tb = _AttrDict(
|
||||||
|
tb_frame=frame,
|
||||||
|
tb_lineno=dct['tb_lineno'],
|
||||||
|
tb_next=tb_next,
|
||||||
|
)
|
||||||
|
return cls(tb)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_string(cls, string, strict=True):
|
||||||
|
frames = []
|
||||||
|
header = strict
|
||||||
|
|
||||||
|
for line in string.splitlines():
|
||||||
|
line = line.rstrip()
|
||||||
|
if header:
|
||||||
|
if line == 'Traceback (most recent call last):':
|
||||||
|
header = False
|
||||||
|
continue
|
||||||
|
frame_match = FRAME_RE.match(line)
|
||||||
|
if frame_match:
|
||||||
|
frames.append(frame_match.groupdict())
|
||||||
|
elif line.startswith(' '):
|
||||||
|
pass
|
||||||
|
elif strict:
|
||||||
|
break # traceback ended
|
||||||
|
|
||||||
|
if frames:
|
||||||
|
previous = None
|
||||||
|
for frame in reversed(frames):
|
||||||
|
previous = _AttrDict(
|
||||||
|
frame,
|
||||||
|
tb_frame=_AttrDict(
|
||||||
|
frame,
|
||||||
|
f_globals=_AttrDict(
|
||||||
|
__file__=frame['co_filename'],
|
||||||
|
__name__='?',
|
||||||
|
),
|
||||||
|
f_code=_AttrDict(frame),
|
||||||
|
),
|
||||||
|
tb_next=previous,
|
||||||
|
)
|
||||||
|
return cls(previous)
|
||||||
|
else:
|
||||||
|
raise TracebackParseError("Could not find any frames in %r." % string)
|
||||||
|
|
||||||
|
# pickling_support.py
|
||||||
|
|
||||||
|
|
||||||
|
def unpickle_traceback(tb_frame, tb_lineno, tb_next):
|
||||||
|
ret = object.__new__(Traceback)
|
||||||
|
ret.tb_frame = tb_frame
|
||||||
|
ret.tb_lineno = tb_lineno
|
||||||
|
ret.tb_next = tb_next
|
||||||
|
return ret.as_traceback()
|
||||||
|
|
||||||
|
|
||||||
|
def pickle_traceback(tb):
|
||||||
|
return unpickle_traceback, (Frame(tb.tb_frame), tb.tb_lineno, tb.tb_next and Traceback(tb.tb_next))
|
||||||
|
|
||||||
|
|
||||||
|
def install():
|
||||||
|
try:
|
||||||
|
import copy_reg
|
||||||
|
except ImportError:
|
||||||
|
import copyreg as copy_reg
|
||||||
|
|
||||||
|
copy_reg.pickle(TracebackType, pickle_traceback)
|
||||||
|
|
||||||
|
# Added by gevent
|
||||||
|
|
||||||
|
# We have to defer the initialization, and especially the import of platform,
|
||||||
|
# until runtime. If we're monkey patched, we need to be sure to use
|
||||||
|
# the original __import__ to avoid switching through the hub due to
|
||||||
|
# import locks on Python 2. See also builtins.py for details.
|
||||||
|
|
||||||
|
|
||||||
|
def _unlocked_imports(f):
|
||||||
|
def g(a):
|
||||||
|
if sys is None: # pragma: no cover
|
||||||
|
# interpreter shutdown on Py2
|
||||||
|
return
|
||||||
|
|
||||||
|
gb = None
|
||||||
|
if 'gevent.builtins' in sys.modules:
|
||||||
|
gb = sys.modules['gevent.builtins']
|
||||||
|
gb._unlock_imports()
|
||||||
|
try:
|
||||||
|
return f(a)
|
||||||
|
finally:
|
||||||
|
if gb is not None:
|
||||||
|
gb._lock_imports()
|
||||||
|
g.__name__ = f.__name__
|
||||||
|
g.__module__ = f.__module__
|
||||||
|
return g
|
||||||
|
|
||||||
|
|
||||||
|
def _import_dump_load():
|
||||||
|
global dumps
|
||||||
|
global loads
|
||||||
|
try:
|
||||||
|
import cPickle as pickle
|
||||||
|
except ImportError:
|
||||||
|
import pickle
|
||||||
|
dumps = pickle.dumps
|
||||||
|
loads = pickle.loads
|
||||||
|
|
||||||
|
dumps = loads = None
|
||||||
|
|
||||||
|
_installed = False
|
||||||
|
|
||||||
|
|
||||||
|
def _init():
|
||||||
|
global _installed
|
||||||
|
global tb_set_next
|
||||||
|
if _installed:
|
||||||
|
return
|
||||||
|
|
||||||
|
_installed = True
|
||||||
|
import platform
|
||||||
|
try:
|
||||||
|
if platform.python_implementation() == 'CPython':
|
||||||
|
tb_set_next = _init_ugly_crap()
|
||||||
|
except Exception as exc:
|
||||||
|
sys.stderr.write("Failed to initialize cpython support: {!r}".format(exc))
|
||||||
|
|
||||||
|
try:
|
||||||
|
from __pypy__ import tproxy
|
||||||
|
except ImportError:
|
||||||
|
tproxy = None
|
||||||
|
|
||||||
|
if not tb_set_next and not tproxy:
|
||||||
|
raise ImportError("Cannot use tblib. Runtime not supported.")
|
||||||
|
_import_dump_load()
|
||||||
|
install()
|
||||||
|
|
||||||
|
|
||||||
|
@_unlocked_imports
|
||||||
|
def dump_traceback(tb):
|
||||||
|
# Both _init and dump/load have to be unlocked, because
|
||||||
|
# copy_reg and pickle can do imports to resolve class names; those
|
||||||
|
# class names are in this module and greenlet safe though
|
||||||
|
_init()
|
||||||
|
return dumps(tb)
|
||||||
|
|
||||||
|
|
||||||
|
@_unlocked_imports
|
||||||
|
def load_traceback(s):
|
||||||
|
_init()
|
||||||
|
return loads(s)
|
||||||
515
python/gevent/_threading.py
Normal file
515
python/gevent/_threading.py
Normal file
@@ -0,0 +1,515 @@
|
|||||||
|
"""A clone of threading module (version 2.7.2) that always
|
||||||
|
targets real OS threads. (Unlike 'threading' which flips between
|
||||||
|
green and OS threads based on whether the monkey patching is in effect
|
||||||
|
or not).
|
||||||
|
|
||||||
|
This module is missing 'Thread' class, but includes 'Queue'.
|
||||||
|
"""
|
||||||
|
from __future__ import absolute_import
|
||||||
|
try:
|
||||||
|
from Queue import Full, Empty
|
||||||
|
except ImportError:
|
||||||
|
from queue import Full, Empty # pylint:disable=import-error
|
||||||
|
from collections import deque
|
||||||
|
import heapq
|
||||||
|
from time import time as _time, sleep as _sleep
|
||||||
|
|
||||||
|
from gevent import monkey
|
||||||
|
from gevent._compat import PY3
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['Condition',
|
||||||
|
'Event',
|
||||||
|
'Lock',
|
||||||
|
'RLock',
|
||||||
|
'Semaphore',
|
||||||
|
'BoundedSemaphore',
|
||||||
|
'Queue',
|
||||||
|
'local',
|
||||||
|
'stack_size']
|
||||||
|
|
||||||
|
|
||||||
|
thread_name = '_thread' if PY3 else 'thread'
|
||||||
|
start_new_thread, Lock, get_ident, local, stack_size = monkey.get_original(thread_name, [
|
||||||
|
'start_new_thread', 'allocate_lock', 'get_ident', '_local', 'stack_size'])
|
||||||
|
|
||||||
|
|
||||||
|
class RLock(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.__block = Lock()
|
||||||
|
self.__owner = None
|
||||||
|
self.__count = 0
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
owner = self.__owner
|
||||||
|
return "<%s owner=%r count=%d>" % (
|
||||||
|
self.__class__.__name__, owner, self.__count)
|
||||||
|
|
||||||
|
def acquire(self, blocking=1):
|
||||||
|
me = get_ident()
|
||||||
|
if self.__owner == me:
|
||||||
|
self.__count = self.__count + 1
|
||||||
|
return 1
|
||||||
|
rc = self.__block.acquire(blocking)
|
||||||
|
if rc:
|
||||||
|
self.__owner = me
|
||||||
|
self.__count = 1
|
||||||
|
return rc
|
||||||
|
|
||||||
|
__enter__ = acquire
|
||||||
|
|
||||||
|
def release(self):
|
||||||
|
if self.__owner != get_ident():
|
||||||
|
raise RuntimeError("cannot release un-acquired lock")
|
||||||
|
self.__count = count = self.__count - 1
|
||||||
|
if not count:
|
||||||
|
self.__owner = None
|
||||||
|
self.__block.release()
|
||||||
|
|
||||||
|
def __exit__(self, t, v, tb):
|
||||||
|
self.release()
|
||||||
|
|
||||||
|
# Internal methods used by condition variables
|
||||||
|
|
||||||
|
def _acquire_restore(self, count_owner):
|
||||||
|
count, owner = count_owner
|
||||||
|
self.__block.acquire()
|
||||||
|
self.__count = count
|
||||||
|
self.__owner = owner
|
||||||
|
|
||||||
|
def _release_save(self):
|
||||||
|
count = self.__count
|
||||||
|
self.__count = 0
|
||||||
|
owner = self.__owner
|
||||||
|
self.__owner = None
|
||||||
|
self.__block.release()
|
||||||
|
return (count, owner)
|
||||||
|
|
||||||
|
def _is_owned(self):
|
||||||
|
return self.__owner == get_ident()
|
||||||
|
|
||||||
|
|
||||||
|
class Condition(object):
|
||||||
|
# pylint:disable=method-hidden
|
||||||
|
|
||||||
|
def __init__(self, lock=None):
|
||||||
|
if lock is None:
|
||||||
|
lock = RLock()
|
||||||
|
self.__lock = lock
|
||||||
|
# Export the lock's acquire() and release() methods
|
||||||
|
self.acquire = lock.acquire
|
||||||
|
self.release = lock.release
|
||||||
|
# If the lock defines _release_save() and/or _acquire_restore(),
|
||||||
|
# these override the default implementations (which just call
|
||||||
|
# release() and acquire() on the lock). Ditto for _is_owned().
|
||||||
|
try:
|
||||||
|
self._release_save = lock._release_save
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
self._acquire_restore = lock._acquire_restore
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
try:
|
||||||
|
self._is_owned = lock._is_owned
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
self.__waiters = []
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
return self.__lock.__enter__()
|
||||||
|
|
||||||
|
def __exit__(self, *args):
|
||||||
|
return self.__lock.__exit__(*args)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<Condition(%s, %d)>" % (self.__lock, len(self.__waiters))
|
||||||
|
|
||||||
|
def _release_save(self):
|
||||||
|
self.__lock.release() # No state to save
|
||||||
|
|
||||||
|
def _acquire_restore(self, x): # pylint:disable=unused-argument
|
||||||
|
self.__lock.acquire() # Ignore saved state
|
||||||
|
|
||||||
|
def _is_owned(self):
|
||||||
|
# Return True if lock is owned by current_thread.
|
||||||
|
# This method is called only if __lock doesn't have _is_owned().
|
||||||
|
if self.__lock.acquire(0):
|
||||||
|
self.__lock.release()
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def wait(self, timeout=None):
|
||||||
|
if not self._is_owned():
|
||||||
|
raise RuntimeError("cannot wait on un-acquired lock")
|
||||||
|
waiter = Lock()
|
||||||
|
waiter.acquire()
|
||||||
|
self.__waiters.append(waiter)
|
||||||
|
saved_state = self._release_save()
|
||||||
|
try: # restore state no matter what (e.g., KeyboardInterrupt)
|
||||||
|
if timeout is None:
|
||||||
|
waiter.acquire()
|
||||||
|
else:
|
||||||
|
# Balancing act: We can't afford a pure busy loop, so we
|
||||||
|
# have to sleep; but if we sleep the whole timeout time,
|
||||||
|
# we'll be unresponsive. The scheme here sleeps very
|
||||||
|
# little at first, longer as time goes on, but never longer
|
||||||
|
# than 20 times per second (or the timeout time remaining).
|
||||||
|
endtime = _time() + timeout
|
||||||
|
delay = 0.0005 # 500 us -> initial delay of 1 ms
|
||||||
|
while True:
|
||||||
|
gotit = waiter.acquire(0)
|
||||||
|
if gotit:
|
||||||
|
break
|
||||||
|
remaining = endtime - _time()
|
||||||
|
if remaining <= 0:
|
||||||
|
break
|
||||||
|
delay = min(delay * 2, remaining, .05)
|
||||||
|
_sleep(delay)
|
||||||
|
if not gotit:
|
||||||
|
try:
|
||||||
|
self.__waiters.remove(waiter)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
self._acquire_restore(saved_state)
|
||||||
|
|
||||||
|
def notify(self, n=1):
|
||||||
|
if not self._is_owned():
|
||||||
|
raise RuntimeError("cannot notify on un-acquired lock")
|
||||||
|
__waiters = self.__waiters
|
||||||
|
waiters = __waiters[:n]
|
||||||
|
if not waiters:
|
||||||
|
return
|
||||||
|
for waiter in waiters:
|
||||||
|
waiter.release()
|
||||||
|
try:
|
||||||
|
__waiters.remove(waiter)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def notify_all(self):
|
||||||
|
self.notify(len(self.__waiters))
|
||||||
|
|
||||||
|
|
||||||
|
class Semaphore(object):
|
||||||
|
|
||||||
|
# After Tim Peters' semaphore class, but not quite the same (no maximum)
|
||||||
|
|
||||||
|
def __init__(self, value=1):
|
||||||
|
if value < 0:
|
||||||
|
raise ValueError("semaphore initial value must be >= 0")
|
||||||
|
self.__cond = Condition(Lock())
|
||||||
|
self.__value = value
|
||||||
|
|
||||||
|
def acquire(self, blocking=1):
|
||||||
|
rc = False
|
||||||
|
self.__cond.acquire()
|
||||||
|
while self.__value == 0:
|
||||||
|
if not blocking:
|
||||||
|
break
|
||||||
|
self.__cond.wait()
|
||||||
|
else:
|
||||||
|
self.__value = self.__value - 1
|
||||||
|
rc = True
|
||||||
|
self.__cond.release()
|
||||||
|
return rc
|
||||||
|
|
||||||
|
__enter__ = acquire
|
||||||
|
|
||||||
|
def release(self):
|
||||||
|
self.__cond.acquire()
|
||||||
|
self.__value = self.__value + 1
|
||||||
|
self.__cond.notify()
|
||||||
|
self.__cond.release()
|
||||||
|
|
||||||
|
def __exit__(self, t, v, tb):
|
||||||
|
self.release()
|
||||||
|
|
||||||
|
|
||||||
|
class BoundedSemaphore(Semaphore):
|
||||||
|
"""Semaphore that checks that # releases is <= # acquires"""
|
||||||
|
def __init__(self, value=1):
|
||||||
|
Semaphore.__init__(self, value)
|
||||||
|
self._initial_value = value
|
||||||
|
|
||||||
|
def release(self):
|
||||||
|
if self.Semaphore__value >= self._initial_value: # pylint:disable=no-member
|
||||||
|
raise ValueError("Semaphore released too many times")
|
||||||
|
return Semaphore.release(self)
|
||||||
|
|
||||||
|
|
||||||
|
class Event(object):
|
||||||
|
|
||||||
|
# After Tim Peters' event class (without is_posted())
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.__cond = Condition(Lock())
|
||||||
|
self.__flag = False
|
||||||
|
|
||||||
|
def _reset_internal_locks(self):
|
||||||
|
# private! called by Thread._reset_internal_locks by _after_fork()
|
||||||
|
self.__cond.__init__()
|
||||||
|
|
||||||
|
def is_set(self):
|
||||||
|
return self.__flag
|
||||||
|
|
||||||
|
def set(self):
|
||||||
|
self.__cond.acquire()
|
||||||
|
try:
|
||||||
|
self.__flag = True
|
||||||
|
self.__cond.notify_all()
|
||||||
|
finally:
|
||||||
|
self.__cond.release()
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
self.__cond.acquire()
|
||||||
|
try:
|
||||||
|
self.__flag = False
|
||||||
|
finally:
|
||||||
|
self.__cond.release()
|
||||||
|
|
||||||
|
def wait(self, timeout=None):
|
||||||
|
self.__cond.acquire()
|
||||||
|
try:
|
||||||
|
if not self.__flag:
|
||||||
|
self.__cond.wait(timeout)
|
||||||
|
return self.__flag
|
||||||
|
finally:
|
||||||
|
self.__cond.release()
|
||||||
|
|
||||||
|
|
||||||
|
class Queue: # pylint:disable=old-style-class
|
||||||
|
"""Create a queue object with a given maximum size.
|
||||||
|
|
||||||
|
If maxsize is <= 0, the queue size is infinite.
|
||||||
|
"""
|
||||||
|
def __init__(self, maxsize=0):
|
||||||
|
self.maxsize = maxsize
|
||||||
|
self._init(maxsize)
|
||||||
|
# mutex must be held whenever the queue is mutating. All methods
|
||||||
|
# that acquire mutex must release it before returning. mutex
|
||||||
|
# is shared between the three conditions, so acquiring and
|
||||||
|
# releasing the conditions also acquires and releases mutex.
|
||||||
|
self.mutex = Lock()
|
||||||
|
# Notify not_empty whenever an item is added to the queue; a
|
||||||
|
# thread waiting to get is notified then.
|
||||||
|
self.not_empty = Condition(self.mutex)
|
||||||
|
# Notify not_full whenever an item is removed from the queue;
|
||||||
|
# a thread waiting to put is notified then.
|
||||||
|
self.not_full = Condition(self.mutex)
|
||||||
|
# Notify all_tasks_done whenever the number of unfinished tasks
|
||||||
|
# drops to zero; thread waiting to join() is notified to resume
|
||||||
|
self.all_tasks_done = Condition(self.mutex)
|
||||||
|
self.unfinished_tasks = 0
|
||||||
|
|
||||||
|
def task_done(self):
|
||||||
|
"""Indicate that a formerly enqueued task is complete.
|
||||||
|
|
||||||
|
Used by Queue consumer threads. For each get() used to fetch a task,
|
||||||
|
a subsequent call to task_done() tells the queue that the processing
|
||||||
|
on the task is complete.
|
||||||
|
|
||||||
|
If a join() is currently blocking, it will resume when all items
|
||||||
|
have been processed (meaning that a task_done() call was received
|
||||||
|
for every item that had been put() into the queue).
|
||||||
|
|
||||||
|
Raises a ValueError if called more times than there were items
|
||||||
|
placed in the queue.
|
||||||
|
"""
|
||||||
|
self.all_tasks_done.acquire()
|
||||||
|
try:
|
||||||
|
unfinished = self.unfinished_tasks - 1
|
||||||
|
if unfinished <= 0:
|
||||||
|
if unfinished < 0:
|
||||||
|
raise ValueError('task_done() called too many times')
|
||||||
|
self.all_tasks_done.notify_all()
|
||||||
|
self.unfinished_tasks = unfinished
|
||||||
|
finally:
|
||||||
|
self.all_tasks_done.release()
|
||||||
|
|
||||||
|
def join(self):
|
||||||
|
"""Blocks until all items in the Queue have been gotten and processed.
|
||||||
|
|
||||||
|
The count of unfinished tasks goes up whenever an item is added to the
|
||||||
|
queue. The count goes down whenever a consumer thread calls task_done()
|
||||||
|
to indicate the item was retrieved and all work on it is complete.
|
||||||
|
|
||||||
|
When the count of unfinished tasks drops to zero, join() unblocks.
|
||||||
|
"""
|
||||||
|
self.all_tasks_done.acquire()
|
||||||
|
try:
|
||||||
|
while self.unfinished_tasks:
|
||||||
|
self.all_tasks_done.wait()
|
||||||
|
finally:
|
||||||
|
self.all_tasks_done.release()
|
||||||
|
|
||||||
|
def qsize(self):
|
||||||
|
"""Return the approximate size of the queue (not reliable!)."""
|
||||||
|
self.mutex.acquire()
|
||||||
|
try:
|
||||||
|
return self._qsize()
|
||||||
|
finally:
|
||||||
|
self.mutex.release()
|
||||||
|
|
||||||
|
def empty(self):
|
||||||
|
"""Return True if the queue is empty, False otherwise (not reliable!)."""
|
||||||
|
self.mutex.acquire()
|
||||||
|
try:
|
||||||
|
return not self._qsize()
|
||||||
|
finally:
|
||||||
|
self.mutex.release()
|
||||||
|
|
||||||
|
def full(self):
|
||||||
|
"""Return True if the queue is full, False otherwise (not reliable!)."""
|
||||||
|
self.mutex.acquire()
|
||||||
|
try:
|
||||||
|
if self.maxsize <= 0:
|
||||||
|
return False
|
||||||
|
if self.maxsize >= self._qsize():
|
||||||
|
return True
|
||||||
|
finally:
|
||||||
|
self.mutex.release()
|
||||||
|
|
||||||
|
def put(self, item, block=True, timeout=None):
|
||||||
|
"""Put an item into the queue.
|
||||||
|
|
||||||
|
If optional args 'block' is true and 'timeout' is None (the default),
|
||||||
|
block if necessary until a free slot is available. If 'timeout' is
|
||||||
|
a positive number, it blocks at most 'timeout' seconds and raises
|
||||||
|
the Full exception if no free slot was available within that time.
|
||||||
|
Otherwise ('block' is false), put an item on the queue if a free slot
|
||||||
|
is immediately available, else raise the Full exception ('timeout'
|
||||||
|
is ignored in that case).
|
||||||
|
"""
|
||||||
|
self.not_full.acquire()
|
||||||
|
try:
|
||||||
|
if self.maxsize > 0:
|
||||||
|
if not block:
|
||||||
|
if self._qsize() >= self.maxsize:
|
||||||
|
raise Full
|
||||||
|
elif timeout is None:
|
||||||
|
while self._qsize() >= self.maxsize:
|
||||||
|
self.not_full.wait()
|
||||||
|
elif timeout < 0:
|
||||||
|
raise ValueError("'timeout' must be a positive number")
|
||||||
|
else:
|
||||||
|
endtime = _time() + timeout
|
||||||
|
while self._qsize() >= self.maxsize:
|
||||||
|
remaining = endtime - _time()
|
||||||
|
if remaining <= 0.0:
|
||||||
|
raise Full
|
||||||
|
self.not_full.wait(remaining)
|
||||||
|
self._put(item)
|
||||||
|
self.unfinished_tasks += 1
|
||||||
|
self.not_empty.notify()
|
||||||
|
finally:
|
||||||
|
self.not_full.release()
|
||||||
|
|
||||||
|
def put_nowait(self, item):
|
||||||
|
"""Put an item into the queue without blocking.
|
||||||
|
|
||||||
|
Only enqueue the item if a free slot is immediately available.
|
||||||
|
Otherwise raise the Full exception.
|
||||||
|
"""
|
||||||
|
return self.put(item, False)
|
||||||
|
|
||||||
|
def get(self, block=True, timeout=None):
|
||||||
|
"""Remove and return an item from the queue.
|
||||||
|
|
||||||
|
If optional args 'block' is true and 'timeout' is None (the default),
|
||||||
|
block if necessary until an item is available. If 'timeout' is
|
||||||
|
a positive number, it blocks at most 'timeout' seconds and raises
|
||||||
|
the Empty exception if no item was available within that time.
|
||||||
|
Otherwise ('block' is false), return an item if one is immediately
|
||||||
|
available, else raise the Empty exception ('timeout' is ignored
|
||||||
|
in that case).
|
||||||
|
"""
|
||||||
|
self.not_empty.acquire()
|
||||||
|
try:
|
||||||
|
if not block:
|
||||||
|
if not self._qsize():
|
||||||
|
raise Empty
|
||||||
|
elif timeout is None:
|
||||||
|
while not self._qsize():
|
||||||
|
self.not_empty.wait()
|
||||||
|
elif timeout < 0:
|
||||||
|
raise ValueError("'timeout' must be a positive number")
|
||||||
|
else:
|
||||||
|
endtime = _time() + timeout
|
||||||
|
while not self._qsize():
|
||||||
|
remaining = endtime - _time()
|
||||||
|
if remaining <= 0.0:
|
||||||
|
raise Empty
|
||||||
|
self.not_empty.wait(remaining)
|
||||||
|
item = self._get()
|
||||||
|
self.not_full.notify()
|
||||||
|
return item
|
||||||
|
finally:
|
||||||
|
self.not_empty.release()
|
||||||
|
|
||||||
|
def get_nowait(self):
|
||||||
|
"""Remove and return an item from the queue without blocking.
|
||||||
|
|
||||||
|
Only get an item if one is immediately available. Otherwise
|
||||||
|
raise the Empty exception.
|
||||||
|
"""
|
||||||
|
return self.get(False)
|
||||||
|
|
||||||
|
# Override these methods to implement other queue organizations
|
||||||
|
# (e.g. stack or priority queue).
|
||||||
|
# These will only be called with appropriate locks held
|
||||||
|
|
||||||
|
# Initialize the queue representation
|
||||||
|
def _init(self, maxsize):
|
||||||
|
# pylint:disable=unused-argument
|
||||||
|
self.queue = deque()
|
||||||
|
|
||||||
|
def _qsize(self, len=len):
|
||||||
|
return len(self.queue)
|
||||||
|
|
||||||
|
# Put a new item in the queue
|
||||||
|
def _put(self, item):
|
||||||
|
self.queue.append(item)
|
||||||
|
|
||||||
|
# Get an item from the queue
|
||||||
|
def _get(self):
|
||||||
|
return self.queue.popleft()
|
||||||
|
|
||||||
|
|
||||||
|
class PriorityQueue(Queue):
|
||||||
|
'''Variant of Queue that retrieves open entries in priority order (lowest first).
|
||||||
|
|
||||||
|
Entries are typically tuples of the form: (priority number, data).
|
||||||
|
'''
|
||||||
|
|
||||||
|
def _init(self, maxsize):
|
||||||
|
self.queue = []
|
||||||
|
|
||||||
|
def _qsize(self, len=len):
|
||||||
|
return len(self.queue)
|
||||||
|
|
||||||
|
def _put(self, item, heappush=heapq.heappush):
|
||||||
|
# pylint:disable=arguments-differ
|
||||||
|
heappush(self.queue, item)
|
||||||
|
|
||||||
|
def _get(self, heappop=heapq.heappop):
|
||||||
|
# pylint:disable=arguments-differ
|
||||||
|
return heappop(self.queue)
|
||||||
|
|
||||||
|
|
||||||
|
class LifoQueue(Queue):
|
||||||
|
'''Variant of Queue that retrieves most recently added entries first.'''
|
||||||
|
|
||||||
|
def _init(self, maxsize):
|
||||||
|
self.queue = []
|
||||||
|
|
||||||
|
def _qsize(self, len=len):
|
||||||
|
return len(self.queue)
|
||||||
|
|
||||||
|
def _put(self, item):
|
||||||
|
self.queue.append(item)
|
||||||
|
|
||||||
|
def _get(self):
|
||||||
|
return self.queue.pop()
|
||||||
106
python/gevent/_util.py
Normal file
106
python/gevent/_util.py
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
internal gevent utilities, not for external use.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import print_function, absolute_import, division
|
||||||
|
|
||||||
|
from gevent._compat import iteritems
|
||||||
|
|
||||||
|
|
||||||
|
class _NONE(object):
|
||||||
|
"""
|
||||||
|
A special object you must never pass to any gevent API.
|
||||||
|
Used as a marker object for keyword arguments that cannot have the
|
||||||
|
builtin None (because that might be a valid value).
|
||||||
|
"""
|
||||||
|
__slots__ = ()
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return '<default value>'
|
||||||
|
|
||||||
|
_NONE = _NONE()
|
||||||
|
|
||||||
|
def copy_globals(source,
|
||||||
|
globs,
|
||||||
|
only_names=None,
|
||||||
|
ignore_missing_names=False,
|
||||||
|
names_to_ignore=(),
|
||||||
|
dunder_names_to_keep=('__implements__', '__all__', '__imports__'),
|
||||||
|
cleanup_globs=True):
|
||||||
|
"""
|
||||||
|
Copy attributes defined in `source.__dict__` to the dictionary in globs
|
||||||
|
(which should be the caller's globals()).
|
||||||
|
|
||||||
|
Names that start with `__` are ignored (unless they are in
|
||||||
|
*dunder_names_to_keep*). Anything found in *names_to_ignore* is
|
||||||
|
also ignored.
|
||||||
|
|
||||||
|
If *only_names* is given, only those attributes will be considered.
|
||||||
|
In this case, *ignore_missing_names* says whether or not to raise an AttributeError
|
||||||
|
if one of those names can't be found.
|
||||||
|
|
||||||
|
If cleanup_globs has a true value, then common things imported but not used
|
||||||
|
at runtime are removed, including this function.
|
||||||
|
|
||||||
|
Returns a list of the names copied
|
||||||
|
"""
|
||||||
|
if only_names:
|
||||||
|
if ignore_missing_names:
|
||||||
|
items = ((k, getattr(source, k, _NONE)) for k in only_names)
|
||||||
|
else:
|
||||||
|
items = ((k, getattr(source, k)) for k in only_names)
|
||||||
|
else:
|
||||||
|
items = iteritems(source.__dict__)
|
||||||
|
|
||||||
|
copied = []
|
||||||
|
for key, value in items:
|
||||||
|
if value is _NONE:
|
||||||
|
continue
|
||||||
|
if key in names_to_ignore:
|
||||||
|
continue
|
||||||
|
if key.startswith("__") and key not in dunder_names_to_keep:
|
||||||
|
continue
|
||||||
|
globs[key] = value
|
||||||
|
copied.append(key)
|
||||||
|
|
||||||
|
if cleanup_globs:
|
||||||
|
if 'copy_globals' in globs:
|
||||||
|
del globs['copy_globals']
|
||||||
|
|
||||||
|
return copied
|
||||||
|
|
||||||
|
class Lazy(object):
|
||||||
|
"""
|
||||||
|
A non-data descriptor used just like @property. The
|
||||||
|
difference is the function value is assigned to the instance
|
||||||
|
dict the first time it is accessed and then the function is never
|
||||||
|
called agoin.
|
||||||
|
"""
|
||||||
|
def __init__(self, func):
|
||||||
|
self.data = (func, func.__name__)
|
||||||
|
|
||||||
|
def __get__(self, inst, class_):
|
||||||
|
if inst is None:
|
||||||
|
return self
|
||||||
|
|
||||||
|
func, name = self.data
|
||||||
|
value = func(inst)
|
||||||
|
inst.__dict__[name] = value
|
||||||
|
return value
|
||||||
|
|
||||||
|
class readproperty(object):
|
||||||
|
"""
|
||||||
|
A non-data descriptor like @property. The difference is that
|
||||||
|
when the property is assigned to, it is cached in the instance
|
||||||
|
and the function is not called on that instance again.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, func):
|
||||||
|
self.func = func
|
||||||
|
|
||||||
|
def __get__(self, inst, class_):
|
||||||
|
if inst is None:
|
||||||
|
return self
|
||||||
|
|
||||||
|
return self.func(inst)
|
||||||
7
python/gevent/_util_py2.py
Normal file
7
python/gevent/_util_py2.py
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
# this produces syntax error on Python3
|
||||||
|
|
||||||
|
__all__ = ['reraise']
|
||||||
|
|
||||||
|
|
||||||
|
def reraise(type, value, tb):
|
||||||
|
raise type, value, tb
|
||||||
454
python/gevent/ares.pyx
Normal file
454
python/gevent/ares.pyx
Normal file
@@ -0,0 +1,454 @@
|
|||||||
|
# Copyright (c) 2011-2012 Denis Bilenko. See LICENSE for details.
|
||||||
|
cimport cares
|
||||||
|
import sys
|
||||||
|
from python cimport *
|
||||||
|
from _socket import gaierror
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['channel']
|
||||||
|
|
||||||
|
cdef object string_types
|
||||||
|
cdef object text_type
|
||||||
|
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
string_types = str,
|
||||||
|
text_type = str
|
||||||
|
else:
|
||||||
|
string_types = __builtins__.basestring,
|
||||||
|
text_type = __builtins__.unicode
|
||||||
|
|
||||||
|
TIMEOUT = 1
|
||||||
|
|
||||||
|
DEF EV_READ = 1
|
||||||
|
DEF EV_WRITE = 2
|
||||||
|
|
||||||
|
|
||||||
|
cdef extern from "dnshelper.c":
|
||||||
|
int AF_INET
|
||||||
|
int AF_INET6
|
||||||
|
|
||||||
|
struct hostent:
|
||||||
|
char* h_name
|
||||||
|
int h_addrtype
|
||||||
|
|
||||||
|
struct sockaddr_t "sockaddr":
|
||||||
|
pass
|
||||||
|
|
||||||
|
struct ares_channeldata:
|
||||||
|
pass
|
||||||
|
|
||||||
|
object parse_h_name(hostent*)
|
||||||
|
object parse_h_aliases(hostent*)
|
||||||
|
object parse_h_addr_list(hostent*)
|
||||||
|
void* create_object_from_hostent(void*)
|
||||||
|
|
||||||
|
# this imports _socket lazily
|
||||||
|
object PyUnicode_FromString(char*)
|
||||||
|
int PyTuple_Check(object)
|
||||||
|
int PyArg_ParseTuple(object, char*, ...) except 0
|
||||||
|
struct sockaddr_in6:
|
||||||
|
pass
|
||||||
|
int gevent_make_sockaddr(char* hostp, int port, int flowinfo, int scope_id, sockaddr_in6* sa6)
|
||||||
|
|
||||||
|
void* malloc(int)
|
||||||
|
void free(void*)
|
||||||
|
void memset(void*, int, int)
|
||||||
|
|
||||||
|
|
||||||
|
ARES_SUCCESS = cares.ARES_SUCCESS
|
||||||
|
ARES_ENODATA = cares.ARES_ENODATA
|
||||||
|
ARES_EFORMERR = cares.ARES_EFORMERR
|
||||||
|
ARES_ESERVFAIL = cares.ARES_ESERVFAIL
|
||||||
|
ARES_ENOTFOUND = cares.ARES_ENOTFOUND
|
||||||
|
ARES_ENOTIMP = cares.ARES_ENOTIMP
|
||||||
|
ARES_EREFUSED = cares.ARES_EREFUSED
|
||||||
|
ARES_EBADQUERY = cares.ARES_EBADQUERY
|
||||||
|
ARES_EBADNAME = cares.ARES_EBADNAME
|
||||||
|
ARES_EBADFAMILY = cares.ARES_EBADFAMILY
|
||||||
|
ARES_EBADRESP = cares.ARES_EBADRESP
|
||||||
|
ARES_ECONNREFUSED = cares.ARES_ECONNREFUSED
|
||||||
|
ARES_ETIMEOUT = cares.ARES_ETIMEOUT
|
||||||
|
ARES_EOF = cares.ARES_EOF
|
||||||
|
ARES_EFILE = cares.ARES_EFILE
|
||||||
|
ARES_ENOMEM = cares.ARES_ENOMEM
|
||||||
|
ARES_EDESTRUCTION = cares.ARES_EDESTRUCTION
|
||||||
|
ARES_EBADSTR = cares.ARES_EBADSTR
|
||||||
|
ARES_EBADFLAGS = cares.ARES_EBADFLAGS
|
||||||
|
ARES_ENONAME = cares.ARES_ENONAME
|
||||||
|
ARES_EBADHINTS = cares.ARES_EBADHINTS
|
||||||
|
ARES_ENOTINITIALIZED = cares.ARES_ENOTINITIALIZED
|
||||||
|
ARES_ELOADIPHLPAPI = cares.ARES_ELOADIPHLPAPI
|
||||||
|
ARES_EADDRGETNETWORKPARAMS = cares.ARES_EADDRGETNETWORKPARAMS
|
||||||
|
ARES_ECANCELLED = cares.ARES_ECANCELLED
|
||||||
|
|
||||||
|
ARES_FLAG_USEVC = cares.ARES_FLAG_USEVC
|
||||||
|
ARES_FLAG_PRIMARY = cares.ARES_FLAG_PRIMARY
|
||||||
|
ARES_FLAG_IGNTC = cares.ARES_FLAG_IGNTC
|
||||||
|
ARES_FLAG_NORECURSE = cares.ARES_FLAG_NORECURSE
|
||||||
|
ARES_FLAG_STAYOPEN = cares.ARES_FLAG_STAYOPEN
|
||||||
|
ARES_FLAG_NOSEARCH = cares.ARES_FLAG_NOSEARCH
|
||||||
|
ARES_FLAG_NOALIASES = cares.ARES_FLAG_NOALIASES
|
||||||
|
ARES_FLAG_NOCHECKRESP = cares.ARES_FLAG_NOCHECKRESP
|
||||||
|
|
||||||
|
|
||||||
|
_ares_errors = dict([
|
||||||
|
(cares.ARES_SUCCESS, 'ARES_SUCCESS'),
|
||||||
|
(cares.ARES_ENODATA, 'ARES_ENODATA'),
|
||||||
|
(cares.ARES_EFORMERR, 'ARES_EFORMERR'),
|
||||||
|
(cares.ARES_ESERVFAIL, 'ARES_ESERVFAIL'),
|
||||||
|
(cares.ARES_ENOTFOUND, 'ARES_ENOTFOUND'),
|
||||||
|
(cares.ARES_ENOTIMP, 'ARES_ENOTIMP'),
|
||||||
|
(cares.ARES_EREFUSED, 'ARES_EREFUSED'),
|
||||||
|
(cares.ARES_EBADQUERY, 'ARES_EBADQUERY'),
|
||||||
|
(cares.ARES_EBADNAME, 'ARES_EBADNAME'),
|
||||||
|
(cares.ARES_EBADFAMILY, 'ARES_EBADFAMILY'),
|
||||||
|
(cares.ARES_EBADRESP, 'ARES_EBADRESP'),
|
||||||
|
(cares.ARES_ECONNREFUSED, 'ARES_ECONNREFUSED'),
|
||||||
|
(cares.ARES_ETIMEOUT, 'ARES_ETIMEOUT'),
|
||||||
|
(cares.ARES_EOF, 'ARES_EOF'),
|
||||||
|
(cares.ARES_EFILE, 'ARES_EFILE'),
|
||||||
|
(cares.ARES_ENOMEM, 'ARES_ENOMEM'),
|
||||||
|
(cares.ARES_EDESTRUCTION, 'ARES_EDESTRUCTION'),
|
||||||
|
(cares.ARES_EBADSTR, 'ARES_EBADSTR'),
|
||||||
|
(cares.ARES_EBADFLAGS, 'ARES_EBADFLAGS'),
|
||||||
|
(cares.ARES_ENONAME, 'ARES_ENONAME'),
|
||||||
|
(cares.ARES_EBADHINTS, 'ARES_EBADHINTS'),
|
||||||
|
(cares.ARES_ENOTINITIALIZED, 'ARES_ENOTINITIALIZED'),
|
||||||
|
(cares.ARES_ELOADIPHLPAPI, 'ARES_ELOADIPHLPAPI'),
|
||||||
|
(cares.ARES_EADDRGETNETWORKPARAMS, 'ARES_EADDRGETNETWORKPARAMS'),
|
||||||
|
(cares.ARES_ECANCELLED, 'ARES_ECANCELLED')])
|
||||||
|
|
||||||
|
|
||||||
|
# maps c-ares flag to _socket module flag
|
||||||
|
_cares_flag_map = None
|
||||||
|
|
||||||
|
|
||||||
|
cdef _prepare_cares_flag_map():
|
||||||
|
global _cares_flag_map
|
||||||
|
import _socket
|
||||||
|
_cares_flag_map = [
|
||||||
|
(getattr(_socket, 'NI_NUMERICHOST', 1), cares.ARES_NI_NUMERICHOST),
|
||||||
|
(getattr(_socket, 'NI_NUMERICSERV', 2), cares.ARES_NI_NUMERICSERV),
|
||||||
|
(getattr(_socket, 'NI_NOFQDN', 4), cares.ARES_NI_NOFQDN),
|
||||||
|
(getattr(_socket, 'NI_NAMEREQD', 8), cares.ARES_NI_NAMEREQD),
|
||||||
|
(getattr(_socket, 'NI_DGRAM', 16), cares.ARES_NI_DGRAM)]
|
||||||
|
|
||||||
|
|
||||||
|
cpdef _convert_cares_flags(int flags, int default=cares.ARES_NI_LOOKUPHOST|cares.ARES_NI_LOOKUPSERVICE):
|
||||||
|
if _cares_flag_map is None:
|
||||||
|
_prepare_cares_flag_map()
|
||||||
|
for socket_flag, cares_flag in _cares_flag_map:
|
||||||
|
if socket_flag & flags:
|
||||||
|
default |= cares_flag
|
||||||
|
flags &= ~socket_flag
|
||||||
|
if not flags:
|
||||||
|
return default
|
||||||
|
raise gaierror(-1, "Bad value for ai_flags: 0x%x" % flags)
|
||||||
|
|
||||||
|
|
||||||
|
cpdef strerror(code):
|
||||||
|
return '%s: %s' % (_ares_errors.get(code) or code, cares.ares_strerror(code))
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidIP(ValueError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
cdef void gevent_sock_state_callback(void *data, int s, int read, int write):
|
||||||
|
if not data:
|
||||||
|
return
|
||||||
|
cdef channel ch = <channel>data
|
||||||
|
ch._sock_state_callback(s, read, write)
|
||||||
|
|
||||||
|
|
||||||
|
cdef class result:
|
||||||
|
cdef public object value
|
||||||
|
cdef public object exception
|
||||||
|
|
||||||
|
def __init__(self, object value=None, object exception=None):
|
||||||
|
self.value = value
|
||||||
|
self.exception = exception
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
if self.exception is None:
|
||||||
|
return '%s(%r)' % (self.__class__.__name__, self.value)
|
||||||
|
elif self.value is None:
|
||||||
|
return '%s(exception=%r)' % (self.__class__.__name__, self.exception)
|
||||||
|
else:
|
||||||
|
return '%s(value=%r, exception=%r)' % (self.__class__.__name__, self.value, self.exception)
|
||||||
|
# add repr_recursive precaution
|
||||||
|
|
||||||
|
def successful(self):
|
||||||
|
return self.exception is None
|
||||||
|
|
||||||
|
def get(self):
|
||||||
|
if self.exception is not None:
|
||||||
|
raise self.exception
|
||||||
|
return self.value
|
||||||
|
|
||||||
|
|
||||||
|
class ares_host_result(tuple):
|
||||||
|
|
||||||
|
def __new__(cls, family, iterable):
|
||||||
|
cdef object self = tuple.__new__(cls, iterable)
|
||||||
|
self.family = family
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __getnewargs__(self):
|
||||||
|
return (self.family, tuple(self))
|
||||||
|
|
||||||
|
|
||||||
|
cdef void gevent_ares_host_callback(void *arg, int status, int timeouts, hostent* host):
|
||||||
|
cdef channel channel
|
||||||
|
cdef object callback
|
||||||
|
channel, callback = <tuple>arg
|
||||||
|
Py_DECREF(<PyObjectPtr>arg)
|
||||||
|
cdef object host_result
|
||||||
|
try:
|
||||||
|
if status or not host:
|
||||||
|
callback(result(None, gaierror(status, strerror(status))))
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
host_result = ares_host_result(host.h_addrtype, (parse_h_name(host), parse_h_aliases(host), parse_h_addr_list(host)))
|
||||||
|
except:
|
||||||
|
callback(result(None, sys.exc_info()[1]))
|
||||||
|
else:
|
||||||
|
callback(result(host_result))
|
||||||
|
except:
|
||||||
|
channel.loop.handle_error(callback, *sys.exc_info())
|
||||||
|
|
||||||
|
|
||||||
|
cdef void gevent_ares_nameinfo_callback(void *arg, int status, int timeouts, char *c_node, char *c_service):
|
||||||
|
cdef channel channel
|
||||||
|
cdef object callback
|
||||||
|
channel, callback = <tuple>arg
|
||||||
|
Py_DECREF(<PyObjectPtr>arg)
|
||||||
|
cdef object node
|
||||||
|
cdef object service
|
||||||
|
try:
|
||||||
|
if status:
|
||||||
|
callback(result(None, gaierror(status, strerror(status))))
|
||||||
|
else:
|
||||||
|
if c_node:
|
||||||
|
node = PyUnicode_FromString(c_node)
|
||||||
|
else:
|
||||||
|
node = None
|
||||||
|
if c_service:
|
||||||
|
service = PyUnicode_FromString(c_service)
|
||||||
|
else:
|
||||||
|
service = None
|
||||||
|
callback(result((node, service)))
|
||||||
|
except:
|
||||||
|
channel.loop.handle_error(callback, *sys.exc_info())
|
||||||
|
|
||||||
|
|
||||||
|
cdef public class channel [object PyGeventAresChannelObject, type PyGeventAresChannel_Type]:
|
||||||
|
|
||||||
|
cdef public object loop
|
||||||
|
cdef ares_channeldata* channel
|
||||||
|
cdef public dict _watchers
|
||||||
|
cdef public object _timer
|
||||||
|
|
||||||
|
def __init__(self, object loop, flags=None, timeout=None, tries=None, ndots=None,
|
||||||
|
udp_port=None, tcp_port=None, servers=None):
|
||||||
|
cdef ares_channeldata* channel = NULL
|
||||||
|
cdef cares.ares_options options
|
||||||
|
memset(&options, 0, sizeof(cares.ares_options))
|
||||||
|
cdef int optmask = cares.ARES_OPT_SOCK_STATE_CB
|
||||||
|
options.sock_state_cb = <void*>gevent_sock_state_callback
|
||||||
|
options.sock_state_cb_data = <void*>self
|
||||||
|
if flags is not None:
|
||||||
|
options.flags = int(flags)
|
||||||
|
optmask |= cares.ARES_OPT_FLAGS
|
||||||
|
if timeout is not None:
|
||||||
|
options.timeout = int(float(timeout) * 1000)
|
||||||
|
optmask |= cares.ARES_OPT_TIMEOUTMS
|
||||||
|
if tries is not None:
|
||||||
|
options.tries = int(tries)
|
||||||
|
optmask |= cares.ARES_OPT_TRIES
|
||||||
|
if ndots is not None:
|
||||||
|
options.ndots = int(ndots)
|
||||||
|
optmask |= cares.ARES_OPT_NDOTS
|
||||||
|
if udp_port is not None:
|
||||||
|
options.udp_port = int(udp_port)
|
||||||
|
optmask |= cares.ARES_OPT_UDP_PORT
|
||||||
|
if tcp_port is not None:
|
||||||
|
options.tcp_port = int(tcp_port)
|
||||||
|
optmask |= cares.ARES_OPT_TCP_PORT
|
||||||
|
cdef int result = cares.ares_library_init(cares.ARES_LIB_INIT_ALL) # ARES_LIB_INIT_WIN32 -DUSE_WINSOCK?
|
||||||
|
if result:
|
||||||
|
raise gaierror(result, strerror(result))
|
||||||
|
result = cares.ares_init_options(&channel, &options, optmask)
|
||||||
|
if result:
|
||||||
|
raise gaierror(result, strerror(result))
|
||||||
|
self._timer = loop.timer(TIMEOUT, TIMEOUT)
|
||||||
|
self._watchers = {}
|
||||||
|
self.channel = channel
|
||||||
|
try:
|
||||||
|
if servers is not None:
|
||||||
|
self.set_servers(servers)
|
||||||
|
self.loop = loop
|
||||||
|
except:
|
||||||
|
self.destroy()
|
||||||
|
raise
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
args = (self.__class__.__name__, id(self), self._timer, len(self._watchers))
|
||||||
|
return '<%s at 0x%x _timer=%r _watchers[%s]>' % args
|
||||||
|
|
||||||
|
def destroy(self):
|
||||||
|
if self.channel:
|
||||||
|
# XXX ares_library_cleanup?
|
||||||
|
cares.ares_destroy(self.channel)
|
||||||
|
self.channel = NULL
|
||||||
|
self._watchers.clear()
|
||||||
|
self._timer.stop()
|
||||||
|
self.loop = None
|
||||||
|
|
||||||
|
def __dealloc__(self):
|
||||||
|
if self.channel:
|
||||||
|
# XXX ares_library_cleanup?
|
||||||
|
cares.ares_destroy(self.channel)
|
||||||
|
self.channel = NULL
|
||||||
|
|
||||||
|
def set_servers(self, servers=None):
|
||||||
|
if not self.channel:
|
||||||
|
raise gaierror(cares.ARES_EDESTRUCTION, 'this ares channel has been destroyed')
|
||||||
|
if not servers:
|
||||||
|
servers = []
|
||||||
|
if isinstance(servers, string_types):
|
||||||
|
servers = servers.split(',')
|
||||||
|
cdef int length = len(servers)
|
||||||
|
cdef int result, index
|
||||||
|
cdef char* string
|
||||||
|
cdef cares.ares_addr_node* c_servers
|
||||||
|
if length <= 0:
|
||||||
|
result = cares.ares_set_servers(self.channel, NULL)
|
||||||
|
else:
|
||||||
|
c_servers = <cares.ares_addr_node*>malloc(sizeof(cares.ares_addr_node) * length)
|
||||||
|
if not c_servers:
|
||||||
|
raise MemoryError
|
||||||
|
try:
|
||||||
|
index = 0
|
||||||
|
for server in servers:
|
||||||
|
if isinstance(server, unicode):
|
||||||
|
server = server.encode('ascii')
|
||||||
|
string = <char*?>server
|
||||||
|
if cares.ares_inet_pton(AF_INET, string, &c_servers[index].addr) > 0:
|
||||||
|
c_servers[index].family = AF_INET
|
||||||
|
elif cares.ares_inet_pton(AF_INET6, string, &c_servers[index].addr) > 0:
|
||||||
|
c_servers[index].family = AF_INET6
|
||||||
|
else:
|
||||||
|
raise InvalidIP(repr(string))
|
||||||
|
c_servers[index].next = &c_servers[index] + 1
|
||||||
|
index += 1
|
||||||
|
if index >= length:
|
||||||
|
break
|
||||||
|
c_servers[length - 1].next = NULL
|
||||||
|
index = cares.ares_set_servers(self.channel, c_servers)
|
||||||
|
if index:
|
||||||
|
raise ValueError(strerror(index))
|
||||||
|
finally:
|
||||||
|
free(c_servers)
|
||||||
|
|
||||||
|
# this crashes c-ares
|
||||||
|
#def cancel(self):
|
||||||
|
# cares.ares_cancel(self.channel)
|
||||||
|
|
||||||
|
cdef _sock_state_callback(self, int socket, int read, int write):
|
||||||
|
if not self.channel:
|
||||||
|
return
|
||||||
|
cdef object watcher = self._watchers.get(socket)
|
||||||
|
cdef int events = 0
|
||||||
|
if read:
|
||||||
|
events |= EV_READ
|
||||||
|
if write:
|
||||||
|
events |= EV_WRITE
|
||||||
|
if watcher is None:
|
||||||
|
if not events:
|
||||||
|
return
|
||||||
|
watcher = self.loop.io(socket, events)
|
||||||
|
self._watchers[socket] = watcher
|
||||||
|
elif events:
|
||||||
|
if watcher.events == events:
|
||||||
|
return
|
||||||
|
watcher.stop()
|
||||||
|
watcher.events = events
|
||||||
|
else:
|
||||||
|
watcher.stop()
|
||||||
|
self._watchers.pop(socket, None)
|
||||||
|
if not self._watchers:
|
||||||
|
self._timer.stop()
|
||||||
|
return
|
||||||
|
watcher.start(self._process_fd, watcher, pass_events=True)
|
||||||
|
self._timer.again(self._on_timer)
|
||||||
|
|
||||||
|
def _on_timer(self):
|
||||||
|
cares.ares_process_fd(self.channel, cares.ARES_SOCKET_BAD, cares.ARES_SOCKET_BAD)
|
||||||
|
|
||||||
|
def _process_fd(self, int events, object watcher):
|
||||||
|
if not self.channel:
|
||||||
|
return
|
||||||
|
cdef int read_fd = watcher.fd
|
||||||
|
cdef int write_fd = read_fd
|
||||||
|
if not (events & EV_READ):
|
||||||
|
read_fd = cares.ARES_SOCKET_BAD
|
||||||
|
if not (events & EV_WRITE):
|
||||||
|
write_fd = cares.ARES_SOCKET_BAD
|
||||||
|
cares.ares_process_fd(self.channel, read_fd, write_fd)
|
||||||
|
|
||||||
|
def gethostbyname(self, object callback, char* name, int family=AF_INET):
|
||||||
|
if not self.channel:
|
||||||
|
raise gaierror(cares.ARES_EDESTRUCTION, 'this ares channel has been destroyed')
|
||||||
|
# note that for file lookups still AF_INET can be returned for AF_INET6 request
|
||||||
|
cdef object arg = (self, callback)
|
||||||
|
Py_INCREF(<PyObjectPtr>arg)
|
||||||
|
cares.ares_gethostbyname(self.channel, name, family, <void*>gevent_ares_host_callback, <void*>arg)
|
||||||
|
|
||||||
|
def gethostbyaddr(self, object callback, char* addr):
|
||||||
|
if not self.channel:
|
||||||
|
raise gaierror(cares.ARES_EDESTRUCTION, 'this ares channel has been destroyed')
|
||||||
|
# will guess the family
|
||||||
|
cdef char addr_packed[16]
|
||||||
|
cdef int family
|
||||||
|
cdef int length
|
||||||
|
if cares.ares_inet_pton(AF_INET, addr, addr_packed) > 0:
|
||||||
|
family = AF_INET
|
||||||
|
length = 4
|
||||||
|
elif cares.ares_inet_pton(AF_INET6, addr, addr_packed) > 0:
|
||||||
|
family = AF_INET6
|
||||||
|
length = 16
|
||||||
|
else:
|
||||||
|
raise InvalidIP(repr(addr))
|
||||||
|
cdef object arg = (self, callback)
|
||||||
|
Py_INCREF(<PyObjectPtr>arg)
|
||||||
|
cares.ares_gethostbyaddr(self.channel, addr_packed, length, family, <void*>gevent_ares_host_callback, <void*>arg)
|
||||||
|
|
||||||
|
cpdef _getnameinfo(self, object callback, tuple sockaddr, int flags):
|
||||||
|
if not self.channel:
|
||||||
|
raise gaierror(cares.ARES_EDESTRUCTION, 'this ares channel has been destroyed')
|
||||||
|
cdef char* hostp = NULL
|
||||||
|
cdef int port = 0
|
||||||
|
cdef int flowinfo = 0
|
||||||
|
cdef int scope_id = 0
|
||||||
|
cdef sockaddr_in6 sa6
|
||||||
|
if not PyTuple_Check(sockaddr):
|
||||||
|
raise TypeError('expected a tuple, got %r' % (sockaddr, ))
|
||||||
|
PyArg_ParseTuple(sockaddr, "si|ii", &hostp, &port, &flowinfo, &scope_id)
|
||||||
|
if port < 0 or port > 65535:
|
||||||
|
raise gaierror(-8, 'Invalid value for port: %r' % port)
|
||||||
|
cdef int length = gevent_make_sockaddr(hostp, port, flowinfo, scope_id, &sa6)
|
||||||
|
if length <= 0:
|
||||||
|
raise InvalidIP(repr(hostp))
|
||||||
|
cdef object arg = (self, callback)
|
||||||
|
Py_INCREF(<PyObjectPtr>arg)
|
||||||
|
cdef sockaddr_t* x = <sockaddr_t*>&sa6
|
||||||
|
cares.ares_getnameinfo(self.channel, x, length, flags, <void*>gevent_ares_nameinfo_callback, <void*>arg)
|
||||||
|
|
||||||
|
def getnameinfo(self, object callback, tuple sockaddr, int flags):
|
||||||
|
try:
|
||||||
|
flags = _convert_cares_flags(flags)
|
||||||
|
except gaierror:
|
||||||
|
# The stdlib just ignores bad flags
|
||||||
|
flags = 0
|
||||||
|
return self._getnameinfo(callback, sockaddr, flags)
|
||||||
206
python/gevent/backdoor.py
Normal file
206
python/gevent/backdoor.py
Normal file
@@ -0,0 +1,206 @@
|
|||||||
|
# Copyright (c) 2009-2014, gevent contributors
|
||||||
|
# Based on eventlet.backdoor Copyright (c) 2005-2006, Bob Ippolito
|
||||||
|
"""
|
||||||
|
Interactive greenlet-based network console that can be used in any process.
|
||||||
|
|
||||||
|
The :class:`BackdoorServer` provides a REPL inside a running process. As
|
||||||
|
long as the process is monkey-patched, the ``BackdoorServer`` can coexist
|
||||||
|
with other elements of the process.
|
||||||
|
|
||||||
|
.. seealso:: :class:`code.InteractiveConsole`
|
||||||
|
"""
|
||||||
|
from __future__ import print_function, absolute_import
|
||||||
|
import sys
|
||||||
|
from code import InteractiveConsole
|
||||||
|
|
||||||
|
from gevent.greenlet import Greenlet
|
||||||
|
from gevent.hub import getcurrent
|
||||||
|
from gevent.server import StreamServer
|
||||||
|
from gevent.pool import Pool
|
||||||
|
|
||||||
|
__all__ = ['BackdoorServer']
|
||||||
|
|
||||||
|
try:
|
||||||
|
sys.ps1
|
||||||
|
except AttributeError:
|
||||||
|
sys.ps1 = '>>> '
|
||||||
|
try:
|
||||||
|
sys.ps2
|
||||||
|
except AttributeError:
|
||||||
|
sys.ps2 = '... '
|
||||||
|
|
||||||
|
class _Greenlet_stdreplace(Greenlet):
|
||||||
|
# A greenlet that replaces sys.std[in/out/err] while running.
|
||||||
|
_fileobj = None
|
||||||
|
saved = None
|
||||||
|
|
||||||
|
def switch(self, *args, **kw):
|
||||||
|
if self._fileobj is not None:
|
||||||
|
self.switch_in()
|
||||||
|
Greenlet.switch(self, *args, **kw)
|
||||||
|
|
||||||
|
def switch_in(self):
|
||||||
|
self.saved = sys.stdin, sys.stderr, sys.stdout
|
||||||
|
sys.stdin = sys.stdout = sys.stderr = self._fileobj
|
||||||
|
|
||||||
|
def switch_out(self):
|
||||||
|
sys.stdin, sys.stderr, sys.stdout = self.saved
|
||||||
|
self.saved = None
|
||||||
|
|
||||||
|
def throw(self, *args, **kwargs):
|
||||||
|
# pylint:disable=arguments-differ
|
||||||
|
if self.saved is None and self._fileobj is not None:
|
||||||
|
self.switch_in()
|
||||||
|
Greenlet.throw(self, *args, **kwargs)
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
try:
|
||||||
|
return Greenlet.run(self)
|
||||||
|
finally:
|
||||||
|
# Make sure to restore the originals.
|
||||||
|
self.switch_out()
|
||||||
|
|
||||||
|
|
||||||
|
class BackdoorServer(StreamServer):
|
||||||
|
"""
|
||||||
|
Provide a backdoor to a program for debugging purposes.
|
||||||
|
|
||||||
|
.. warning:: This backdoor provides no authentication and makes no
|
||||||
|
attempt to limit what remote users can do. Anyone that
|
||||||
|
can access the server can take any action that the running
|
||||||
|
python process can. Thus, while you may bind to any interface, for
|
||||||
|
security purposes it is recommended that you bind to one
|
||||||
|
only accessible to the local machine, e.g.,
|
||||||
|
127.0.0.1/localhost.
|
||||||
|
|
||||||
|
Basic usage::
|
||||||
|
|
||||||
|
from gevent.backdoor import BackdoorServer
|
||||||
|
server = BackdoorServer(('127.0.0.1', 5001),
|
||||||
|
banner="Hello from gevent backdoor!",
|
||||||
|
locals={'foo': "From defined scope!"})
|
||||||
|
server.serve_forever()
|
||||||
|
|
||||||
|
In a another terminal, connect with...::
|
||||||
|
|
||||||
|
$ telnet 127.0.0.1 5001
|
||||||
|
Trying 127.0.0.1...
|
||||||
|
Connected to 127.0.0.1.
|
||||||
|
Escape character is '^]'.
|
||||||
|
Hello from gevent backdoor!
|
||||||
|
>> print(foo)
|
||||||
|
From defined scope!
|
||||||
|
|
||||||
|
.. versionchanged:: 1.2a1
|
||||||
|
Spawned greenlets are now tracked in a pool and killed when the server
|
||||||
|
is stopped.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, listener, locals=None, banner=None, **server_args):
|
||||||
|
"""
|
||||||
|
:keyword locals: If given, a dictionary of "builtin" values that will be available
|
||||||
|
at the top-level.
|
||||||
|
:keyword banner: If geven, a string that will be printed to each connecting user.
|
||||||
|
"""
|
||||||
|
group = Pool(greenlet_class=_Greenlet_stdreplace) # no limit on number
|
||||||
|
StreamServer.__init__(self, listener, spawn=group, **server_args)
|
||||||
|
_locals = {'__doc__': None, '__name__': '__console__'}
|
||||||
|
if locals:
|
||||||
|
_locals.update(locals)
|
||||||
|
self.locals = _locals
|
||||||
|
|
||||||
|
self.banner = banner
|
||||||
|
self.stderr = sys.stderr
|
||||||
|
|
||||||
|
def _create_interactive_locals(self):
|
||||||
|
# Create and return a *new* locals dictionary based on self.locals,
|
||||||
|
# and set any new entries in it. (InteractiveConsole does not
|
||||||
|
# copy its locals value)
|
||||||
|
_locals = self.locals.copy()
|
||||||
|
# __builtins__ may either be the __builtin__ module or
|
||||||
|
# __builtin__.__dict__; in the latter case typing
|
||||||
|
# locals() at the backdoor prompt spews out lots of
|
||||||
|
# useless stuff
|
||||||
|
try:
|
||||||
|
import __builtin__
|
||||||
|
_locals["__builtins__"] = __builtin__
|
||||||
|
except ImportError:
|
||||||
|
import builtins # pylint:disable=import-error
|
||||||
|
_locals["builtins"] = builtins
|
||||||
|
_locals['__builtins__'] = builtins
|
||||||
|
return _locals
|
||||||
|
|
||||||
|
def handle(self, conn, _address): # pylint: disable=method-hidden
|
||||||
|
"""
|
||||||
|
Interact with one remote user.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1b2 Each connection gets its own
|
||||||
|
``locals`` dictionary. Previously they were shared in a
|
||||||
|
potentially unsafe manner.
|
||||||
|
"""
|
||||||
|
fobj = conn.makefile(mode="rw")
|
||||||
|
fobj = _fileobject(conn, fobj, self.stderr)
|
||||||
|
getcurrent()._fileobj = fobj
|
||||||
|
|
||||||
|
getcurrent().switch_in()
|
||||||
|
try:
|
||||||
|
console = InteractiveConsole(self._create_interactive_locals())
|
||||||
|
if sys.version_info[:3] >= (3, 6, 0):
|
||||||
|
# Beginning in 3.6, the console likes to print "now exiting <class>"
|
||||||
|
# but probably our socket is already closed, so this just causes problems.
|
||||||
|
console.interact(banner=self.banner, exitmsg='') # pylint:disable=unexpected-keyword-arg
|
||||||
|
else:
|
||||||
|
console.interact(banner=self.banner)
|
||||||
|
except SystemExit: # raised by quit()
|
||||||
|
if hasattr(sys, 'exc_clear'): # py2
|
||||||
|
sys.exc_clear()
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
fobj.close()
|
||||||
|
|
||||||
|
|
||||||
|
class _fileobject(object):
|
||||||
|
"""
|
||||||
|
A file-like object that wraps the result of socket.makefile (composition
|
||||||
|
instead of inheritance lets us work identically under CPython and PyPy).
|
||||||
|
|
||||||
|
We write directly to the socket, avoiding the buffering that the text-oriented
|
||||||
|
makefile would want to do (otherwise we'd be at the mercy of waiting on a
|
||||||
|
flush() to get called for the remote user to see data); this beats putting
|
||||||
|
the file in binary mode and translating everywhere with a non-default
|
||||||
|
encoding.
|
||||||
|
"""
|
||||||
|
def __init__(self, sock, fobj, stderr):
|
||||||
|
self._sock = sock
|
||||||
|
self._fobj = fobj
|
||||||
|
self.stderr = stderr
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
return getattr(self._fobj, name)
|
||||||
|
|
||||||
|
def write(self, data):
|
||||||
|
if not isinstance(data, bytes):
|
||||||
|
data = data.encode('utf-8')
|
||||||
|
self._sock.sendall(data)
|
||||||
|
|
||||||
|
def isatty(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def flush(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def readline(self, *a):
|
||||||
|
try:
|
||||||
|
return self._fobj.readline(*a).replace("\r\n", "\n")
|
||||||
|
except UnicodeError:
|
||||||
|
# Typically, under python 3, a ^C on the other end
|
||||||
|
return ''
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
if not sys.argv[1:]:
|
||||||
|
print('USAGE: %s PORT [banner]' % sys.argv[0])
|
||||||
|
else:
|
||||||
|
BackdoorServer(('127.0.0.1', int(sys.argv[1])),
|
||||||
|
banner=(sys.argv[2] if len(sys.argv) > 2 else None),
|
||||||
|
locals={'hello': 'world'}).serve_forever()
|
||||||
402
python/gevent/baseserver.py
Normal file
402
python/gevent/baseserver.py
Normal file
@@ -0,0 +1,402 @@
|
|||||||
|
"""Base class for implementing servers"""
|
||||||
|
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
|
||||||
|
import sys
|
||||||
|
import _socket
|
||||||
|
import errno
|
||||||
|
from gevent.greenlet import Greenlet
|
||||||
|
from gevent.event import Event
|
||||||
|
from gevent.hub import get_hub
|
||||||
|
from gevent._compat import string_types, integer_types, xrange
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['BaseServer']
|
||||||
|
|
||||||
|
|
||||||
|
# We define a helper function to handle closing the socket in
|
||||||
|
# do_handle; We'd like to bind it to a kwarg to avoid *any* lookups at
|
||||||
|
# all, but that's incompatible with the calling convention of
|
||||||
|
# do_handle. On CPython, this is ~20% faster than creating and calling
|
||||||
|
# a closure and ~10% faster than using a @staticmethod. (In theory, we
|
||||||
|
# could create a closure only once in set_handle, to wrap self._handle,
|
||||||
|
# but this is safer from a backwards compat standpoint.)
|
||||||
|
# we also avoid unpacking the *args tuple when calling/spawning this object
|
||||||
|
# for a tiny improvement (benchmark shows a wash)
|
||||||
|
def _handle_and_close_when_done(handle, close, args_tuple):
|
||||||
|
try:
|
||||||
|
return handle(*args_tuple)
|
||||||
|
finally:
|
||||||
|
close(*args_tuple)
|
||||||
|
|
||||||
|
|
||||||
|
class BaseServer(object):
|
||||||
|
"""
|
||||||
|
An abstract base class that implements some common functionality for the servers in gevent.
|
||||||
|
|
||||||
|
:param listener: Either be an address that the server should bind
|
||||||
|
on or a :class:`gevent.socket.socket` instance that is already
|
||||||
|
bound (and put into listening mode in case of TCP socket).
|
||||||
|
|
||||||
|
:keyword handle: If given, the request handler. The request
|
||||||
|
handler can be defined in a few ways. Most commonly,
|
||||||
|
subclasses will implement a ``handle`` method as an
|
||||||
|
instance method. Alternatively, a function can be passed
|
||||||
|
as the ``handle`` argument to the constructor. In either
|
||||||
|
case, the handler can later be changed by calling
|
||||||
|
:meth:`set_handle`.
|
||||||
|
|
||||||
|
When the request handler returns, the socket used for the
|
||||||
|
request will be closed. Therefore, the handler must not return if
|
||||||
|
the socket is still in use (for example, by manually spawned greenlets).
|
||||||
|
|
||||||
|
:keyword spawn: If provided, is called to create a new
|
||||||
|
greenlet to run the handler. By default,
|
||||||
|
:func:`gevent.spawn` is used (meaning there is no
|
||||||
|
artificial limit on the number of concurrent requests). Possible values for *spawn*:
|
||||||
|
|
||||||
|
- a :class:`gevent.pool.Pool` instance -- ``handle`` will be executed
|
||||||
|
using :meth:`gevent.pool.Pool.spawn` only if the pool is not full.
|
||||||
|
While it is full, no new connections are accepted;
|
||||||
|
- :func:`gevent.spawn_raw` -- ``handle`` will be executed in a raw
|
||||||
|
greenlet which has a little less overhead then :class:`gevent.Greenlet` instances spawned by default;
|
||||||
|
- ``None`` -- ``handle`` will be executed right away, in the :class:`Hub` greenlet.
|
||||||
|
``handle`` cannot use any blocking functions as it would mean switching to the :class:`Hub`.
|
||||||
|
- an integer -- a shortcut for ``gevent.pool.Pool(integer)``
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1a1
|
||||||
|
When the *handle* function returns from processing a connection,
|
||||||
|
the client socket will be closed. This resolves the non-deterministic
|
||||||
|
closing of the socket, fixing ResourceWarnings under Python 3 and PyPy.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# pylint: disable=too-many-instance-attributes,bare-except,broad-except
|
||||||
|
|
||||||
|
#: the number of seconds to sleep in case there was an error in accept() call
|
||||||
|
#: for consecutive errors the delay will double until it reaches max_delay
|
||||||
|
#: when accept() finally succeeds the delay will be reset to min_delay again
|
||||||
|
min_delay = 0.01
|
||||||
|
max_delay = 1
|
||||||
|
|
||||||
|
#: Sets the maximum number of consecutive accepts that a process may perform on
|
||||||
|
#: a single wake up. High values give higher priority to high connection rates,
|
||||||
|
#: while lower values give higher priority to already established connections.
|
||||||
|
#: Default is 100. Note, that in case of multiple working processes on the same
|
||||||
|
#: listening value, it should be set to a lower value. (pywsgi.WSGIServer sets it
|
||||||
|
#: to 1 when environ["wsgi.multiprocess"] is true)
|
||||||
|
max_accept = 100
|
||||||
|
|
||||||
|
_spawn = Greenlet.spawn
|
||||||
|
|
||||||
|
#: the default timeout that we wait for the client connections to close in stop()
|
||||||
|
stop_timeout = 1
|
||||||
|
|
||||||
|
fatal_errors = (errno.EBADF, errno.EINVAL, errno.ENOTSOCK)
|
||||||
|
|
||||||
|
def __init__(self, listener, handle=None, spawn='default'):
|
||||||
|
self._stop_event = Event()
|
||||||
|
self._stop_event.set()
|
||||||
|
self._watcher = None
|
||||||
|
self._timer = None
|
||||||
|
self._handle = None
|
||||||
|
# XXX: FIXME: Subclasses rely on the presence or absence of the
|
||||||
|
# `socket` attribute to determine whether we are open/should be opened.
|
||||||
|
# Instead, have it be None.
|
||||||
|
self.pool = None
|
||||||
|
try:
|
||||||
|
self.set_listener(listener)
|
||||||
|
self.set_spawn(spawn)
|
||||||
|
self.set_handle(handle)
|
||||||
|
self.delay = self.min_delay
|
||||||
|
self.loop = get_hub().loop
|
||||||
|
if self.max_accept < 1:
|
||||||
|
raise ValueError('max_accept must be positive int: %r' % (self.max_accept, ))
|
||||||
|
except:
|
||||||
|
self.close()
|
||||||
|
raise
|
||||||
|
|
||||||
|
def set_listener(self, listener):
|
||||||
|
if hasattr(listener, 'accept'):
|
||||||
|
if hasattr(listener, 'do_handshake'):
|
||||||
|
raise TypeError('Expected a regular socket, not SSLSocket: %r' % (listener, ))
|
||||||
|
self.family = listener.family
|
||||||
|
self.address = listener.getsockname()
|
||||||
|
self.socket = listener
|
||||||
|
else:
|
||||||
|
self.family, self.address = parse_address(listener)
|
||||||
|
|
||||||
|
def set_spawn(self, spawn):
|
||||||
|
if spawn == 'default':
|
||||||
|
self.pool = None
|
||||||
|
self._spawn = self._spawn
|
||||||
|
elif hasattr(spawn, 'spawn'):
|
||||||
|
self.pool = spawn
|
||||||
|
self._spawn = spawn.spawn
|
||||||
|
elif isinstance(spawn, integer_types):
|
||||||
|
from gevent.pool import Pool
|
||||||
|
self.pool = Pool(spawn)
|
||||||
|
self._spawn = self.pool.spawn
|
||||||
|
else:
|
||||||
|
self.pool = None
|
||||||
|
self._spawn = spawn
|
||||||
|
if hasattr(self.pool, 'full'):
|
||||||
|
self.full = self.pool.full
|
||||||
|
if self.pool is not None:
|
||||||
|
self.pool._semaphore.rawlink(self._start_accepting_if_started)
|
||||||
|
|
||||||
|
def set_handle(self, handle):
|
||||||
|
if handle is not None:
|
||||||
|
self.handle = handle
|
||||||
|
if hasattr(self, 'handle'):
|
||||||
|
self._handle = self.handle
|
||||||
|
else:
|
||||||
|
raise TypeError("'handle' must be provided")
|
||||||
|
|
||||||
|
def _start_accepting_if_started(self, _event=None):
|
||||||
|
if self.started:
|
||||||
|
self.start_accepting()
|
||||||
|
|
||||||
|
def start_accepting(self):
|
||||||
|
if self._watcher is None:
|
||||||
|
# just stop watcher without creating a new one?
|
||||||
|
self._watcher = self.loop.io(self.socket.fileno(), 1)
|
||||||
|
self._watcher.start(self._do_read)
|
||||||
|
|
||||||
|
def stop_accepting(self):
|
||||||
|
if self._watcher is not None:
|
||||||
|
self._watcher.stop()
|
||||||
|
self._watcher = None
|
||||||
|
if self._timer is not None:
|
||||||
|
self._timer.stop()
|
||||||
|
self._timer = None
|
||||||
|
|
||||||
|
def do_handle(self, *args):
|
||||||
|
spawn = self._spawn
|
||||||
|
handle = self._handle
|
||||||
|
close = self.do_close
|
||||||
|
|
||||||
|
try:
|
||||||
|
if spawn is None:
|
||||||
|
_handle_and_close_when_done(handle, close, args)
|
||||||
|
else:
|
||||||
|
spawn(_handle_and_close_when_done, handle, close, args)
|
||||||
|
except:
|
||||||
|
close(*args)
|
||||||
|
raise
|
||||||
|
|
||||||
|
def do_close(self, *args):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def do_read(self):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def _do_read(self):
|
||||||
|
for _ in xrange(self.max_accept):
|
||||||
|
if self.full():
|
||||||
|
self.stop_accepting()
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
args = self.do_read()
|
||||||
|
self.delay = self.min_delay
|
||||||
|
if not args:
|
||||||
|
return
|
||||||
|
except:
|
||||||
|
self.loop.handle_error(self, *sys.exc_info())
|
||||||
|
ex = sys.exc_info()[1]
|
||||||
|
if self.is_fatal_error(ex):
|
||||||
|
self.close()
|
||||||
|
sys.stderr.write('ERROR: %s failed with %s\n' % (self, str(ex) or repr(ex)))
|
||||||
|
return
|
||||||
|
if self.delay >= 0:
|
||||||
|
self.stop_accepting()
|
||||||
|
self._timer = self.loop.timer(self.delay)
|
||||||
|
self._timer.start(self._start_accepting_if_started)
|
||||||
|
self.delay = min(self.max_delay, self.delay * 2)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
self.do_handle(*args)
|
||||||
|
except:
|
||||||
|
self.loop.handle_error((args[1:], self), *sys.exc_info())
|
||||||
|
if self.delay >= 0:
|
||||||
|
self.stop_accepting()
|
||||||
|
self._timer = self.loop.timer(self.delay)
|
||||||
|
self._timer.start(self._start_accepting_if_started)
|
||||||
|
self.delay = min(self.max_delay, self.delay * 2)
|
||||||
|
break
|
||||||
|
|
||||||
|
def full(self):
|
||||||
|
# copied from self.pool
|
||||||
|
# pylint: disable=method-hidden
|
||||||
|
return False
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._formatinfo())
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return '<%s %s>' % (type(self).__name__, self._formatinfo())
|
||||||
|
|
||||||
|
def _formatinfo(self):
|
||||||
|
if hasattr(self, 'socket'):
|
||||||
|
try:
|
||||||
|
fileno = self.socket.fileno()
|
||||||
|
except Exception as ex:
|
||||||
|
fileno = str(ex)
|
||||||
|
result = 'fileno=%s ' % fileno
|
||||||
|
else:
|
||||||
|
result = ''
|
||||||
|
try:
|
||||||
|
if isinstance(self.address, tuple) and len(self.address) == 2:
|
||||||
|
result += 'address=%s:%s' % self.address
|
||||||
|
else:
|
||||||
|
result += 'address=%s' % (self.address, )
|
||||||
|
except Exception as ex:
|
||||||
|
result += str(ex) or '<error>'
|
||||||
|
|
||||||
|
handle = self.__dict__.get('handle')
|
||||||
|
if handle is not None:
|
||||||
|
fself = getattr(handle, '__self__', None)
|
||||||
|
try:
|
||||||
|
if fself is self:
|
||||||
|
# Checks the __self__ of the handle in case it is a bound
|
||||||
|
# method of self to prevent recursivly defined reprs.
|
||||||
|
handle_repr = '<bound method %s.%s of self>' % (
|
||||||
|
self.__class__.__name__,
|
||||||
|
handle.__name__,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
handle_repr = repr(handle)
|
||||||
|
|
||||||
|
result += ' handle=' + handle_repr
|
||||||
|
except Exception as ex:
|
||||||
|
result += str(ex) or '<error>'
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
@property
|
||||||
|
def server_host(self):
|
||||||
|
"""IP address that the server is bound to (string)."""
|
||||||
|
if isinstance(self.address, tuple):
|
||||||
|
return self.address[0]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def server_port(self):
|
||||||
|
"""Port that the server is bound to (an integer)."""
|
||||||
|
if isinstance(self.address, tuple):
|
||||||
|
return self.address[1]
|
||||||
|
|
||||||
|
def init_socket(self):
|
||||||
|
"""If the user initialized the server with an address rather than socket,
|
||||||
|
then this function will create a socket, bind it and put it into listening mode.
|
||||||
|
|
||||||
|
It is not supposed to be called by the user, it is called by :meth:`start` before starting
|
||||||
|
the accept loop."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@property
|
||||||
|
def started(self):
|
||||||
|
return not self._stop_event.is_set()
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
"""Start accepting the connections.
|
||||||
|
|
||||||
|
If an address was provided in the constructor, then also create a socket,
|
||||||
|
bind it and put it into the listening mode.
|
||||||
|
"""
|
||||||
|
self.init_socket()
|
||||||
|
self._stop_event.clear()
|
||||||
|
try:
|
||||||
|
self.start_accepting()
|
||||||
|
except:
|
||||||
|
self.close()
|
||||||
|
raise
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""Close the listener socket and stop accepting."""
|
||||||
|
self._stop_event.set()
|
||||||
|
try:
|
||||||
|
self.stop_accepting()
|
||||||
|
finally:
|
||||||
|
try:
|
||||||
|
self.socket.close()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
self.__dict__.pop('socket', None)
|
||||||
|
self.__dict__.pop('handle', None)
|
||||||
|
self.__dict__.pop('_handle', None)
|
||||||
|
self.__dict__.pop('_spawn', None)
|
||||||
|
self.__dict__.pop('full', None)
|
||||||
|
if self.pool is not None:
|
||||||
|
self.pool._semaphore.unlink(self._start_accepting_if_started)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def closed(self):
|
||||||
|
return not hasattr(self, 'socket')
|
||||||
|
|
||||||
|
def stop(self, timeout=None):
|
||||||
|
"""
|
||||||
|
Stop accepting the connections and close the listening socket.
|
||||||
|
|
||||||
|
If the server uses a pool to spawn the requests, then
|
||||||
|
:meth:`stop` also waits for all the handlers to exit. If there
|
||||||
|
are still handlers executing after *timeout* has expired
|
||||||
|
(default 1 second, :attr:`stop_timeout`), then the currently
|
||||||
|
running handlers in the pool are killed.
|
||||||
|
|
||||||
|
If the server does not use a pool, then this merely stops accepting connections;
|
||||||
|
any spawned greenlets that are handling requests continue running until
|
||||||
|
they naturally complete.
|
||||||
|
"""
|
||||||
|
self.close()
|
||||||
|
if timeout is None:
|
||||||
|
timeout = self.stop_timeout
|
||||||
|
if self.pool:
|
||||||
|
self.pool.join(timeout=timeout)
|
||||||
|
self.pool.kill(block=True, timeout=1)
|
||||||
|
|
||||||
|
def serve_forever(self, stop_timeout=None):
|
||||||
|
"""Start the server if it hasn't been already started and wait until it's stopped."""
|
||||||
|
# add test that serve_forever exists on stop()
|
||||||
|
if not self.started:
|
||||||
|
self.start()
|
||||||
|
try:
|
||||||
|
self._stop_event.wait()
|
||||||
|
finally:
|
||||||
|
Greenlet.spawn(self.stop, timeout=stop_timeout).join()
|
||||||
|
|
||||||
|
def is_fatal_error(self, ex):
|
||||||
|
return isinstance(ex, _socket.error) and ex.args[0] in self.fatal_errors
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_family(host):
|
||||||
|
if host.startswith('[') and host.endswith(']'):
|
||||||
|
host = host[1:-1]
|
||||||
|
return _socket.AF_INET6, host
|
||||||
|
return _socket.AF_INET, host
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_address(address):
|
||||||
|
if isinstance(address, tuple):
|
||||||
|
if not address[0] or ':' in address[0]:
|
||||||
|
return _socket.AF_INET6, address
|
||||||
|
return _socket.AF_INET, address
|
||||||
|
|
||||||
|
if ((isinstance(address, string_types) and ':' not in address)
|
||||||
|
or isinstance(address, integer_types)): # noqa (pep8 E129)
|
||||||
|
# Just a port
|
||||||
|
return _socket.AF_INET6, ('', int(address))
|
||||||
|
|
||||||
|
if not isinstance(address, string_types):
|
||||||
|
raise TypeError('Expected tuple or string, got %s' % type(address))
|
||||||
|
|
||||||
|
host, port = address.rsplit(':', 1)
|
||||||
|
family, host = _extract_family(host)
|
||||||
|
if host == '*':
|
||||||
|
host = ''
|
||||||
|
return family, (host, int(port))
|
||||||
|
|
||||||
|
|
||||||
|
def parse_address(address):
|
||||||
|
try:
|
||||||
|
return _parse_address(address)
|
||||||
|
except ValueError as ex:
|
||||||
|
raise ValueError('Failed to parse address %r: %s' % (address, ex))
|
||||||
125
python/gevent/builtins.py
Normal file
125
python/gevent/builtins.py
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
# Copyright (c) 2015 gevent contributors. See LICENSE for details.
|
||||||
|
"""gevent friendly implementations of builtin functions."""
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
import imp # deprecated since 3.4; issues PendingDeprecationWarning in 3.5
|
||||||
|
import sys
|
||||||
|
import weakref
|
||||||
|
from gevent.lock import RLock
|
||||||
|
|
||||||
|
# Normally we'd have the "expected" case inside the try
|
||||||
|
# (Python 3, because Python 3 is the way forward). But
|
||||||
|
# under Python 2, the popular `future` library *also* provides
|
||||||
|
# a `builtins` module---which lacks the __import__ attribute.
|
||||||
|
# So we test for the old, deprecated version first
|
||||||
|
|
||||||
|
try: # Py2
|
||||||
|
import __builtin__ as builtins
|
||||||
|
_allowed_module_name_types = (basestring,) # pylint:disable=undefined-variable
|
||||||
|
__target__ = '__builtin__'
|
||||||
|
except ImportError:
|
||||||
|
import builtins # pylint: disable=import-error
|
||||||
|
_allowed_module_name_types = (str,)
|
||||||
|
__target__ = 'builtins'
|
||||||
|
|
||||||
|
_import = builtins.__import__
|
||||||
|
|
||||||
|
# We need to protect imports both across threads and across greenlets.
|
||||||
|
# And the order matters. Note that under 3.4, the global import lock
|
||||||
|
# and imp module are deprecated. It seems that in all Py3 versions, a
|
||||||
|
# module lock is used such that this fix is not necessary.
|
||||||
|
|
||||||
|
# We emulate the per-module locking system under Python 2 in order to
|
||||||
|
# avoid issues acquiring locks in multiple-level-deep imports
|
||||||
|
# that attempt to use the gevent blocking API at runtime; using one lock
|
||||||
|
# could lead to a LoopExit error as a greenlet attempts to block on it while
|
||||||
|
# it's already held by the main greenlet (issue #798).
|
||||||
|
|
||||||
|
# We base this approach on a simplification of what `importlib._bootstrap`
|
||||||
|
# does; notably, we don't check for deadlocks
|
||||||
|
|
||||||
|
_g_import_locks = {} # name -> wref of RLock
|
||||||
|
|
||||||
|
__lock_imports = True
|
||||||
|
|
||||||
|
|
||||||
|
def __module_lock(name):
|
||||||
|
# Return the lock for the given module, creating it if necessary.
|
||||||
|
# It will be removed when no longer needed.
|
||||||
|
# Nothing in this function yields, so we're multi-greenlet safe
|
||||||
|
# (But not multi-threading safe.)
|
||||||
|
# XXX: What about on PyPy, where the GC is asynchronous (not ref-counting)?
|
||||||
|
# (Does it stop-the-world first?)
|
||||||
|
lock = None
|
||||||
|
try:
|
||||||
|
lock = _g_import_locks[name]()
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if lock is None:
|
||||||
|
lock = RLock()
|
||||||
|
|
||||||
|
def cb(_):
|
||||||
|
# We've seen a KeyError on PyPy on RPi2
|
||||||
|
_g_import_locks.pop(name, None)
|
||||||
|
_g_import_locks[name] = weakref.ref(lock, cb)
|
||||||
|
return lock
|
||||||
|
|
||||||
|
|
||||||
|
def __import__(*args, **kwargs):
|
||||||
|
"""
|
||||||
|
__import__(name, globals=None, locals=None, fromlist=(), level=0) -> object
|
||||||
|
|
||||||
|
Normally python protects imports against concurrency by doing some locking
|
||||||
|
at the C level (at least, it does that in CPython). This function just
|
||||||
|
wraps the normal __import__ functionality in a recursive lock, ensuring that
|
||||||
|
we're protected against greenlet import concurrency as well.
|
||||||
|
"""
|
||||||
|
if args and not issubclass(type(args[0]), _allowed_module_name_types):
|
||||||
|
# if a builtin has been acquired as a bound instance method,
|
||||||
|
# python knows not to pass 'self' when the method is called.
|
||||||
|
# No such protection exists for monkey-patched builtins,
|
||||||
|
# however, so this is necessary.
|
||||||
|
args = args[1:]
|
||||||
|
|
||||||
|
if not __lock_imports:
|
||||||
|
return _import(*args, **kwargs)
|
||||||
|
|
||||||
|
module_lock = __module_lock(args[0]) # Get a lock for the module name
|
||||||
|
imp.acquire_lock()
|
||||||
|
try:
|
||||||
|
module_lock.acquire()
|
||||||
|
try:
|
||||||
|
result = _import(*args, **kwargs)
|
||||||
|
finally:
|
||||||
|
module_lock.release()
|
||||||
|
finally:
|
||||||
|
imp.release_lock()
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def _unlock_imports():
|
||||||
|
"""
|
||||||
|
Internal function, called when gevent needs to perform imports
|
||||||
|
lazily, but does not know the state of the system. It may be impossible
|
||||||
|
to take the import lock because there are no other running greenlets, for
|
||||||
|
example. This causes a monkey-patched __import__ to avoid taking any locks.
|
||||||
|
until the corresponding call to lock_imports. This should only be done for limited
|
||||||
|
amounts of time and when the set of imports is statically known to be "safe".
|
||||||
|
"""
|
||||||
|
global __lock_imports
|
||||||
|
# This could easily become a list that we push/pop from or an integer
|
||||||
|
# we increment if we need to do this recursively, but we shouldn't get
|
||||||
|
# that complex.
|
||||||
|
__lock_imports = False
|
||||||
|
|
||||||
|
|
||||||
|
def _lock_imports():
|
||||||
|
global __lock_imports
|
||||||
|
__lock_imports = True
|
||||||
|
|
||||||
|
if sys.version_info[:2] >= (3, 3):
|
||||||
|
__implements__ = []
|
||||||
|
else:
|
||||||
|
__implements__ = ['__import__']
|
||||||
|
__all__ = __implements__
|
||||||
109
python/gevent/cares.pxd
Normal file
109
python/gevent/cares.pxd
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
cdef extern from "ares.h":
|
||||||
|
struct ares_options:
|
||||||
|
int flags
|
||||||
|
void* sock_state_cb
|
||||||
|
void* sock_state_cb_data
|
||||||
|
int timeout
|
||||||
|
int tries
|
||||||
|
int ndots
|
||||||
|
unsigned short udp_port
|
||||||
|
unsigned short tcp_port
|
||||||
|
char **domains
|
||||||
|
int ndomains
|
||||||
|
char* lookups
|
||||||
|
|
||||||
|
int ARES_OPT_FLAGS
|
||||||
|
int ARES_OPT_SOCK_STATE_CB
|
||||||
|
int ARES_OPT_TIMEOUTMS
|
||||||
|
int ARES_OPT_TRIES
|
||||||
|
int ARES_OPT_NDOTS
|
||||||
|
int ARES_OPT_TCP_PORT
|
||||||
|
int ARES_OPT_UDP_PORT
|
||||||
|
int ARES_OPT_SERVERS
|
||||||
|
int ARES_OPT_DOMAINS
|
||||||
|
int ARES_OPT_LOOKUPS
|
||||||
|
|
||||||
|
int ARES_FLAG_USEVC
|
||||||
|
int ARES_FLAG_PRIMARY
|
||||||
|
int ARES_FLAG_IGNTC
|
||||||
|
int ARES_FLAG_NORECURSE
|
||||||
|
int ARES_FLAG_STAYOPEN
|
||||||
|
int ARES_FLAG_NOSEARCH
|
||||||
|
int ARES_FLAG_NOALIASES
|
||||||
|
int ARES_FLAG_NOCHECKRESP
|
||||||
|
|
||||||
|
int ARES_LIB_INIT_ALL
|
||||||
|
int ARES_SOCKET_BAD
|
||||||
|
|
||||||
|
int ARES_SUCCESS
|
||||||
|
int ARES_ENODATA
|
||||||
|
int ARES_EFORMERR
|
||||||
|
int ARES_ESERVFAIL
|
||||||
|
int ARES_ENOTFOUND
|
||||||
|
int ARES_ENOTIMP
|
||||||
|
int ARES_EREFUSED
|
||||||
|
int ARES_EBADQUERY
|
||||||
|
int ARES_EBADNAME
|
||||||
|
int ARES_EBADFAMILY
|
||||||
|
int ARES_EBADRESP
|
||||||
|
int ARES_ECONNREFUSED
|
||||||
|
int ARES_ETIMEOUT
|
||||||
|
int ARES_EOF
|
||||||
|
int ARES_EFILE
|
||||||
|
int ARES_ENOMEM
|
||||||
|
int ARES_EDESTRUCTION
|
||||||
|
int ARES_EBADSTR
|
||||||
|
int ARES_EBADFLAGS
|
||||||
|
int ARES_ENONAME
|
||||||
|
int ARES_EBADHINTS
|
||||||
|
int ARES_ENOTINITIALIZED
|
||||||
|
int ARES_ELOADIPHLPAPI
|
||||||
|
int ARES_EADDRGETNETWORKPARAMS
|
||||||
|
int ARES_ECANCELLED
|
||||||
|
|
||||||
|
int ARES_NI_NOFQDN
|
||||||
|
int ARES_NI_NUMERICHOST
|
||||||
|
int ARES_NI_NAMEREQD
|
||||||
|
int ARES_NI_NUMERICSERV
|
||||||
|
int ARES_NI_DGRAM
|
||||||
|
int ARES_NI_TCP
|
||||||
|
int ARES_NI_UDP
|
||||||
|
int ARES_NI_SCTP
|
||||||
|
int ARES_NI_DCCP
|
||||||
|
int ARES_NI_NUMERICSCOPE
|
||||||
|
int ARES_NI_LOOKUPHOST
|
||||||
|
int ARES_NI_LOOKUPSERVICE
|
||||||
|
|
||||||
|
|
||||||
|
int ares_library_init(int flags)
|
||||||
|
void ares_library_cleanup()
|
||||||
|
int ares_init_options(void *channelptr, ares_options *options, int)
|
||||||
|
int ares_init(void *channelptr)
|
||||||
|
void ares_destroy(void *channelptr)
|
||||||
|
void ares_gethostbyname(void* channel, char *name, int family, void* callback, void *arg)
|
||||||
|
void ares_gethostbyaddr(void* channel, void *addr, int addrlen, int family, void* callback, void *arg)
|
||||||
|
void ares_process_fd(void* channel, int read_fd, int write_fd)
|
||||||
|
char* ares_strerror(int code)
|
||||||
|
void ares_cancel(void* channel)
|
||||||
|
void ares_getnameinfo(void* channel, void* sa, int salen, int flags, void* callback, void *arg)
|
||||||
|
|
||||||
|
struct in_addr:
|
||||||
|
pass
|
||||||
|
|
||||||
|
struct ares_in6_addr:
|
||||||
|
pass
|
||||||
|
|
||||||
|
struct addr_union:
|
||||||
|
in_addr addr4
|
||||||
|
ares_in6_addr addr6
|
||||||
|
|
||||||
|
struct ares_addr_node:
|
||||||
|
ares_addr_node *next
|
||||||
|
int family
|
||||||
|
addr_union addr
|
||||||
|
|
||||||
|
int ares_set_servers(void* channel, ares_addr_node *servers)
|
||||||
|
|
||||||
|
|
||||||
|
cdef extern from "cares_pton.h":
|
||||||
|
int ares_inet_pton(int af, char *src, void *dst)
|
||||||
7
python/gevent/cares_ntop.h
Normal file
7
python/gevent/cares_ntop.h
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
#ifdef CARES_EMBED
|
||||||
|
#include "ares_setup.h"
|
||||||
|
#include "ares.h"
|
||||||
|
#else
|
||||||
|
#include <arpa/inet.h>
|
||||||
|
#define ares_inet_ntop(w,x,y,z) inet_ntop(w,x,y,z)
|
||||||
|
#endif
|
||||||
8
python/gevent/cares_pton.h
Normal file
8
python/gevent/cares_pton.h
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
#ifdef CARES_EMBED
|
||||||
|
#include "ares_setup.h"
|
||||||
|
#include "ares_inet_net_pton.h"
|
||||||
|
#else
|
||||||
|
#include <arpa/inet.h>
|
||||||
|
#define ares_inet_pton(x,y,z) inet_pton(x,y,z)
|
||||||
|
#define ares_inet_net_pton(w,x,y,z) inet_net_pton(w,x,y,z)
|
||||||
|
#endif
|
||||||
22
python/gevent/core.py
Normal file
22
python/gevent/core.py
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# Copyright (c) 2009-2015 Denis Bilenko and gevent contributors. See LICENSE for details.
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
from gevent._util import copy_globals
|
||||||
|
|
||||||
|
try:
|
||||||
|
if os.environ.get('GEVENT_CORE_CFFI_ONLY'):
|
||||||
|
raise ImportError("Not attempting corecext")
|
||||||
|
|
||||||
|
from gevent.libev import corecext as _core
|
||||||
|
except ImportError:
|
||||||
|
if os.environ.get('GEVENT_CORE_CEXT_ONLY'):
|
||||||
|
raise
|
||||||
|
|
||||||
|
# CFFI/PyPy
|
||||||
|
from gevent.libev import corecffi as _core
|
||||||
|
|
||||||
|
copy_globals(_core, globals())
|
||||||
|
|
||||||
|
__all__ = _core.__all__ # pylint:disable=no-member
|
||||||
159
python/gevent/dnshelper.c
Normal file
159
python/gevent/dnshelper.c
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
/* Copyright (c) 2011 Denis Bilenko. See LICENSE for details. */
|
||||||
|
#include "Python.h"
|
||||||
|
#ifdef CARES_EMBED
|
||||||
|
#include "ares_setup.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef HAVE_NETDB_H
|
||||||
|
#include <netdb.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "ares.h"
|
||||||
|
|
||||||
|
#include "cares_ntop.h"
|
||||||
|
#include "cares_pton.h"
|
||||||
|
|
||||||
|
#if PY_VERSION_HEX < 0x02060000
|
||||||
|
#define PyUnicode_FromString PyString_FromString
|
||||||
|
#elif PY_MAJOR_VERSION < 3
|
||||||
|
#define PyUnicode_FromString PyBytes_FromString
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
static PyObject* _socket_error = 0;
|
||||||
|
|
||||||
|
static PyObject*
|
||||||
|
get_socket_object(PyObject** pobject, const char* name)
|
||||||
|
{
|
||||||
|
if (!*pobject) {
|
||||||
|
PyObject* _socket;
|
||||||
|
_socket = PyImport_ImportModule("_socket");
|
||||||
|
if (_socket) {
|
||||||
|
*pobject = PyObject_GetAttrString(_socket, name);
|
||||||
|
if (!*pobject) {
|
||||||
|
PyErr_WriteUnraisable(Py_None);
|
||||||
|
}
|
||||||
|
Py_DECREF(_socket);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
PyErr_WriteUnraisable(Py_None);
|
||||||
|
}
|
||||||
|
if (!*pobject) {
|
||||||
|
*pobject = PyExc_IOError;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return *pobject;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static int
|
||||||
|
gevent_append_addr(PyObject* list, int family, void* src, char* tmpbuf, size_t tmpsize) {
|
||||||
|
int status = -1;
|
||||||
|
PyObject* tmp;
|
||||||
|
if (ares_inet_ntop(family, src, tmpbuf, tmpsize)) {
|
||||||
|
tmp = PyUnicode_FromString(tmpbuf);
|
||||||
|
if (tmp) {
|
||||||
|
status = PyList_Append(list, tmp);
|
||||||
|
Py_DECREF(tmp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static PyObject*
|
||||||
|
parse_h_name(struct hostent *h)
|
||||||
|
{
|
||||||
|
return PyUnicode_FromString(h->h_name);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static PyObject*
|
||||||
|
parse_h_aliases(struct hostent *h)
|
||||||
|
{
|
||||||
|
char **pch;
|
||||||
|
PyObject *result = NULL;
|
||||||
|
PyObject *tmp;
|
||||||
|
|
||||||
|
result = PyList_New(0);
|
||||||
|
|
||||||
|
if (result && h->h_aliases) {
|
||||||
|
for (pch = h->h_aliases; *pch != NULL; pch++) {
|
||||||
|
if (*pch != h->h_name && strcmp(*pch, h->h_name)) {
|
||||||
|
int status;
|
||||||
|
tmp = PyUnicode_FromString(*pch);
|
||||||
|
if (tmp == NULL) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
status = PyList_Append(result, tmp);
|
||||||
|
Py_DECREF(tmp);
|
||||||
|
|
||||||
|
if (status) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static PyObject *
|
||||||
|
parse_h_addr_list(struct hostent *h)
|
||||||
|
{
|
||||||
|
char **pch;
|
||||||
|
PyObject *result = NULL;
|
||||||
|
|
||||||
|
result = PyList_New(0);
|
||||||
|
|
||||||
|
if (result) {
|
||||||
|
switch (h->h_addrtype) {
|
||||||
|
case AF_INET:
|
||||||
|
{
|
||||||
|
char tmpbuf[sizeof "255.255.255.255"];
|
||||||
|
for (pch = h->h_addr_list; *pch != NULL; pch++) {
|
||||||
|
if (gevent_append_addr(result, AF_INET, *pch, tmpbuf, sizeof(tmpbuf))) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case AF_INET6:
|
||||||
|
{
|
||||||
|
char tmpbuf[sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")];
|
||||||
|
for (pch = h->h_addr_list; *pch != NULL; pch++) {
|
||||||
|
if (gevent_append_addr(result, AF_INET6, *pch, tmpbuf, sizeof(tmpbuf))) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
PyErr_SetString(get_socket_object(&_socket_error, "error"), "unsupported address family");
|
||||||
|
Py_DECREF(result);
|
||||||
|
result = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static int
|
||||||
|
gevent_make_sockaddr(char* hostp, int port, int flowinfo, int scope_id, struct sockaddr_in6* sa6) {
|
||||||
|
if ( ares_inet_pton(AF_INET, hostp, &((struct sockaddr_in*)sa6)->sin_addr.s_addr) > 0 ) {
|
||||||
|
((struct sockaddr_in*)sa6)->sin_family = AF_INET;
|
||||||
|
((struct sockaddr_in*)sa6)->sin_port = htons(port);
|
||||||
|
return sizeof(struct sockaddr_in);
|
||||||
|
}
|
||||||
|
else if ( ares_inet_pton(AF_INET6, hostp, &sa6->sin6_addr.s6_addr) > 0 ) {
|
||||||
|
sa6->sin6_family = AF_INET6;
|
||||||
|
sa6->sin6_port = htons(port);
|
||||||
|
sa6->sin6_flowinfo = flowinfo;
|
||||||
|
sa6->sin6_scope_id = scope_id;
|
||||||
|
return sizeof(struct sockaddr_in6);
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
448
python/gevent/event.py
Normal file
448
python/gevent/event.py
Normal file
@@ -0,0 +1,448 @@
|
|||||||
|
# Copyright (c) 2009-2016 Denis Bilenko, gevent contributors. See LICENSE for details.
|
||||||
|
"""Basic synchronization primitives: Event and AsyncResult"""
|
||||||
|
from __future__ import print_function
|
||||||
|
import sys
|
||||||
|
from gevent.hub import get_hub, getcurrent, _NONE
|
||||||
|
from gevent._compat import reraise
|
||||||
|
from gevent.hub import InvalidSwitchError
|
||||||
|
from gevent.timeout import Timeout
|
||||||
|
from gevent._tblib import dump_traceback, load_traceback
|
||||||
|
|
||||||
|
__all__ = ['Event', 'AsyncResult']
|
||||||
|
|
||||||
|
|
||||||
|
class _AbstractLinkable(object):
|
||||||
|
# Encapsulates the standard parts of the linking and notifying protocol
|
||||||
|
# common to both repeatable events and one-time events (AsyncResult).
|
||||||
|
|
||||||
|
_notifier = None
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
# Also previously, AsyncResult maintained the order of notifications, but Event
|
||||||
|
# did not; this implementation does not. (Event also only call callbacks one
|
||||||
|
# time (set), but AsyncResult permitted duplicates.)
|
||||||
|
|
||||||
|
# HOWEVER, gevent.queue.Queue does guarantee the order of getters relative
|
||||||
|
# to putters. Some existing documentation out on the net likes to refer to
|
||||||
|
# gevent as "deterministic", such that running the same program twice will
|
||||||
|
# produce results in the same order (so long as I/O isn't involved). This could
|
||||||
|
# be an argument to maintain order. (One easy way to do that while guaranteeing
|
||||||
|
# uniqueness would be with a 2.7+ OrderedDict.)
|
||||||
|
self._links = set()
|
||||||
|
self.hub = get_hub()
|
||||||
|
|
||||||
|
def ready(self):
|
||||||
|
# Instances must define this
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def _check_and_notify(self):
|
||||||
|
# If this object is ready to be notified, begin the process.
|
||||||
|
if self.ready():
|
||||||
|
if self._links and not self._notifier:
|
||||||
|
self._notifier = self.hub.loop.run_callback(self._notify_links)
|
||||||
|
|
||||||
|
def rawlink(self, callback):
|
||||||
|
"""
|
||||||
|
Register a callback to call when this object is ready.
|
||||||
|
|
||||||
|
*callback* will be called in the :class:`Hub <gevent.hub.Hub>`, so it must not use blocking gevent API.
|
||||||
|
*callback* will be passed one argument: this instance.
|
||||||
|
"""
|
||||||
|
if not callable(callback):
|
||||||
|
raise TypeError('Expected callable: %r' % (callback, ))
|
||||||
|
self._links.add(callback)
|
||||||
|
self._check_and_notify()
|
||||||
|
|
||||||
|
def unlink(self, callback):
|
||||||
|
"""Remove the callback set by :meth:`rawlink`"""
|
||||||
|
try:
|
||||||
|
self._links.remove(callback)
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _notify_links(self):
|
||||||
|
# Actually call the notification callbacks. Those callbacks in todo that are
|
||||||
|
# still in _links are called. This method is careful to avoid iterating
|
||||||
|
# over self._links, because links could be added or removed while this
|
||||||
|
# method runs. Only links present when this method begins running
|
||||||
|
# will be called; if a callback adds a new link, it will not run
|
||||||
|
# until the next time notify_links is activated
|
||||||
|
|
||||||
|
# We don't need to capture self._links as todo when establishing
|
||||||
|
# this callback; any links removed between now and then are handled
|
||||||
|
# by the `if` below; any links added are also grabbed
|
||||||
|
todo = set(self._links)
|
||||||
|
for link in todo:
|
||||||
|
# check that link was not notified yet and was not removed by the client
|
||||||
|
# We have to do this here, and not as part of the 'for' statement because
|
||||||
|
# a previous link(self) call might have altered self._links
|
||||||
|
if link in self._links:
|
||||||
|
try:
|
||||||
|
link(self)
|
||||||
|
except: # pylint:disable=bare-except
|
||||||
|
self.hub.handle_error((link, self), *sys.exc_info())
|
||||||
|
if getattr(link, 'auto_unlink', None):
|
||||||
|
# This attribute can avoid having to keep a reference to the function
|
||||||
|
# *in* the function, which is a cycle
|
||||||
|
self.unlink(link)
|
||||||
|
|
||||||
|
# save a tiny bit of memory by letting _notifier be collected
|
||||||
|
# bool(self._notifier) would turn to False as soon as we exit this
|
||||||
|
# method anyway.
|
||||||
|
del todo
|
||||||
|
del self._notifier
|
||||||
|
|
||||||
|
def _wait_core(self, timeout, catch=Timeout):
|
||||||
|
# The core of the wait implementation, handling
|
||||||
|
# switching and linking. If *catch* is set to (),
|
||||||
|
# a timeout that elapses will be allowed to be raised.
|
||||||
|
# Returns a true value if the wait succeeded without timing out.
|
||||||
|
switch = getcurrent().switch
|
||||||
|
self.rawlink(switch)
|
||||||
|
try:
|
||||||
|
timer = Timeout._start_new_or_dummy(timeout)
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
result = self.hub.switch()
|
||||||
|
if result is not self: # pragma: no cover
|
||||||
|
raise InvalidSwitchError('Invalid switch into Event.wait(): %r' % (result, ))
|
||||||
|
return True
|
||||||
|
except catch as ex:
|
||||||
|
if ex is not timer:
|
||||||
|
raise
|
||||||
|
# test_set_and_clear and test_timeout in test_threading
|
||||||
|
# rely on the exact return values, not just truthish-ness
|
||||||
|
return False
|
||||||
|
finally:
|
||||||
|
timer.cancel()
|
||||||
|
finally:
|
||||||
|
self.unlink(switch)
|
||||||
|
|
||||||
|
def _wait_return_value(self, waited, wait_success):
|
||||||
|
# pylint:disable=unused-argument
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _wait(self, timeout=None):
|
||||||
|
if self.ready():
|
||||||
|
return self._wait_return_value(False, False)
|
||||||
|
|
||||||
|
gotit = self._wait_core(timeout)
|
||||||
|
return self._wait_return_value(True, gotit)
|
||||||
|
|
||||||
|
|
||||||
|
class Event(_AbstractLinkable):
|
||||||
|
"""A synchronization primitive that allows one greenlet to wake up one or more others.
|
||||||
|
It has the same interface as :class:`threading.Event` but works across greenlets.
|
||||||
|
|
||||||
|
An event object manages an internal flag that can be set to true with the
|
||||||
|
:meth:`set` method and reset to false with the :meth:`clear` method. The :meth:`wait` method
|
||||||
|
blocks until the flag is true.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
The order and timing in which waiting greenlets are awakened is not determined.
|
||||||
|
As an implementation note, in gevent 1.1 and 1.0, waiting greenlets are awakened in a
|
||||||
|
undetermined order sometime *after* the current greenlet yields to the event loop. Other greenlets
|
||||||
|
(those not waiting to be awakened) may run between the current greenlet yielding and
|
||||||
|
the waiting greenlets being awakened. These details may change in the future.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_flag = False
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return '<%s %s _links[%s]>' % (self.__class__.__name__, (self._flag and 'set') or 'clear', len(self._links))
|
||||||
|
|
||||||
|
def is_set(self):
|
||||||
|
"""Return true if and only if the internal flag is true."""
|
||||||
|
return self._flag
|
||||||
|
|
||||||
|
isSet = is_set # makes it a better drop-in replacement for threading.Event
|
||||||
|
ready = is_set # makes it compatible with AsyncResult and Greenlet (for example in wait())
|
||||||
|
|
||||||
|
def set(self):
|
||||||
|
"""
|
||||||
|
Set the internal flag to true.
|
||||||
|
|
||||||
|
All greenlets waiting for it to become true are awakened in
|
||||||
|
some order at some time in the future. Greenlets that call
|
||||||
|
:meth:`wait` once the flag is true will not block at all
|
||||||
|
(until :meth:`clear` is called).
|
||||||
|
"""
|
||||||
|
self._flag = True
|
||||||
|
self._check_and_notify()
|
||||||
|
|
||||||
|
def clear(self):
|
||||||
|
"""
|
||||||
|
Reset the internal flag to false.
|
||||||
|
|
||||||
|
Subsequently, threads calling :meth:`wait` will block until
|
||||||
|
:meth:`set` is called to set the internal flag to true again.
|
||||||
|
"""
|
||||||
|
self._flag = False
|
||||||
|
|
||||||
|
def _wait_return_value(self, waited, wait_success):
|
||||||
|
# To avoid the race condition outlined in http://bugs.python.org/issue13502,
|
||||||
|
# if we had to wait, then we need to return whether or not
|
||||||
|
# the condition got changed. Otherwise we simply echo
|
||||||
|
# the current state of the flag (which should be true)
|
||||||
|
if not waited:
|
||||||
|
flag = self._flag
|
||||||
|
assert flag, "if we didn't wait we should already be set"
|
||||||
|
return flag
|
||||||
|
|
||||||
|
return wait_success
|
||||||
|
|
||||||
|
def wait(self, timeout=None):
|
||||||
|
"""
|
||||||
|
Block until the internal flag is true.
|
||||||
|
|
||||||
|
If the internal flag is true on entry, return immediately. Otherwise,
|
||||||
|
block until another thread (greenlet) calls :meth:`set` to set the flag to true,
|
||||||
|
or until the optional timeout occurs.
|
||||||
|
|
||||||
|
When the *timeout* argument is present and not ``None``, it should be a
|
||||||
|
floating point number specifying a timeout for the operation in seconds
|
||||||
|
(or fractions thereof).
|
||||||
|
|
||||||
|
:return: This method returns true if and only if the internal flag has been set to
|
||||||
|
true, either before the wait call or after the wait starts, so it will
|
||||||
|
always return ``True`` except if a timeout is given and the operation
|
||||||
|
times out.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1
|
||||||
|
The return value represents the flag during the elapsed wait, not
|
||||||
|
just after it elapses. This solves a race condition if one greenlet
|
||||||
|
sets and then clears the flag without switching, while other greenlets
|
||||||
|
are waiting. When the waiters wake up, this will return True; previously,
|
||||||
|
they would still wake up, but the return value would be False. This is most
|
||||||
|
noticeable when the *timeout* is present.
|
||||||
|
"""
|
||||||
|
return self._wait(timeout)
|
||||||
|
|
||||||
|
def _reset_internal_locks(self): # pragma: no cover
|
||||||
|
# for compatibility with threading.Event (only in case of patch_all(Event=True), by default Event is not patched)
|
||||||
|
# Exception AttributeError: AttributeError("'Event' object has no attribute '_reset_internal_locks'",)
|
||||||
|
# in <module 'threading' from '/usr/lib/python2.7/threading.pyc'> ignored
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class AsyncResult(_AbstractLinkable):
|
||||||
|
"""A one-time event that stores a value or an exception.
|
||||||
|
|
||||||
|
Like :class:`Event` it wakes up all the waiters when :meth:`set` or :meth:`set_exception`
|
||||||
|
is called. Waiters may receive the passed value or exception by calling :meth:`get`
|
||||||
|
instead of :meth:`wait`. An :class:`AsyncResult` instance cannot be reset.
|
||||||
|
|
||||||
|
To pass a value call :meth:`set`. Calls to :meth:`get` (those that are currently blocking as well as
|
||||||
|
those made in the future) will return the value:
|
||||||
|
|
||||||
|
>>> result = AsyncResult()
|
||||||
|
>>> result.set(100)
|
||||||
|
>>> result.get()
|
||||||
|
100
|
||||||
|
|
||||||
|
To pass an exception call :meth:`set_exception`. This will cause :meth:`get` to raise that exception:
|
||||||
|
|
||||||
|
>>> result = AsyncResult()
|
||||||
|
>>> result.set_exception(RuntimeError('failure'))
|
||||||
|
>>> result.get()
|
||||||
|
Traceback (most recent call last):
|
||||||
|
...
|
||||||
|
RuntimeError: failure
|
||||||
|
|
||||||
|
:class:`AsyncResult` implements :meth:`__call__` and thus can be used as :meth:`link` target:
|
||||||
|
|
||||||
|
>>> import gevent
|
||||||
|
>>> result = AsyncResult()
|
||||||
|
>>> gevent.spawn(lambda : 1/0).link(result)
|
||||||
|
>>> try:
|
||||||
|
... result.get()
|
||||||
|
... except ZeroDivisionError:
|
||||||
|
... print('ZeroDivisionError')
|
||||||
|
ZeroDivisionError
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
The order and timing in which waiting greenlets are awakened is not determined.
|
||||||
|
As an implementation note, in gevent 1.1 and 1.0, waiting greenlets are awakened in a
|
||||||
|
undetermined order sometime *after* the current greenlet yields to the event loop. Other greenlets
|
||||||
|
(those not waiting to be awakened) may run between the current greenlet yielding and
|
||||||
|
the waiting greenlets being awakened. These details may change in the future.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1
|
||||||
|
The exact order in which waiting greenlets are awakened is not the same
|
||||||
|
as in 1.0.
|
||||||
|
.. versionchanged:: 1.1
|
||||||
|
Callbacks :meth:`linked <rawlink>` to this object are required to be hashable, and duplicates are
|
||||||
|
merged.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_value = _NONE
|
||||||
|
_exc_info = ()
|
||||||
|
_notifier = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _exception(self):
|
||||||
|
return self._exc_info[1] if self._exc_info else _NONE
|
||||||
|
|
||||||
|
@property
|
||||||
|
def value(self):
|
||||||
|
"""
|
||||||
|
Holds the value passed to :meth:`set` if :meth:`set` was called. Otherwise,
|
||||||
|
``None``
|
||||||
|
"""
|
||||||
|
return self._value if self._value is not _NONE else None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def exc_info(self):
|
||||||
|
"""
|
||||||
|
The three-tuple of exception information if :meth:`set_exception` was called.
|
||||||
|
"""
|
||||||
|
if self._exc_info:
|
||||||
|
return (self._exc_info[0], self._exc_info[1], load_traceback(self._exc_info[2]))
|
||||||
|
return ()
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
result = '<%s ' % (self.__class__.__name__, )
|
||||||
|
if self.value is not None or self._exception is not _NONE:
|
||||||
|
result += 'value=%r ' % self.value
|
||||||
|
if self._exception is not None and self._exception is not _NONE:
|
||||||
|
result += 'exception=%r ' % self._exception
|
||||||
|
if self._exception is _NONE:
|
||||||
|
result += 'unset '
|
||||||
|
return result + ' _links[%s]>' % len(self._links)
|
||||||
|
|
||||||
|
def ready(self):
|
||||||
|
"""Return true if and only if it holds a value or an exception"""
|
||||||
|
return self._exc_info or self._value is not _NONE
|
||||||
|
|
||||||
|
def successful(self):
|
||||||
|
"""Return true if and only if it is ready and holds a value"""
|
||||||
|
return self._value is not _NONE
|
||||||
|
|
||||||
|
@property
|
||||||
|
def exception(self):
|
||||||
|
"""Holds the exception instance passed to :meth:`set_exception` if :meth:`set_exception` was called.
|
||||||
|
Otherwise ``None``."""
|
||||||
|
if self._exc_info:
|
||||||
|
return self._exc_info[1]
|
||||||
|
|
||||||
|
def set(self, value=None):
|
||||||
|
"""Store the value and wake up any waiters.
|
||||||
|
|
||||||
|
All greenlets blocking on :meth:`get` or :meth:`wait` are awakened.
|
||||||
|
Subsequent calls to :meth:`wait` and :meth:`get` will not block at all.
|
||||||
|
"""
|
||||||
|
self._value = value
|
||||||
|
self._check_and_notify()
|
||||||
|
|
||||||
|
def set_exception(self, exception, exc_info=None):
|
||||||
|
"""Store the exception and wake up any waiters.
|
||||||
|
|
||||||
|
All greenlets blocking on :meth:`get` or :meth:`wait` are awakened.
|
||||||
|
Subsequent calls to :meth:`wait` and :meth:`get` will not block at all.
|
||||||
|
|
||||||
|
:keyword tuple exc_info: If given, a standard three-tuple of type, value, :class:`traceback`
|
||||||
|
as returned by :func:`sys.exc_info`. This will be used when the exception
|
||||||
|
is re-raised to propagate the correct traceback.
|
||||||
|
"""
|
||||||
|
if exc_info:
|
||||||
|
self._exc_info = (exc_info[0], exc_info[1], dump_traceback(exc_info[2]))
|
||||||
|
else:
|
||||||
|
self._exc_info = (type(exception), exception, dump_traceback(None))
|
||||||
|
|
||||||
|
self._check_and_notify()
|
||||||
|
|
||||||
|
def _raise_exception(self):
|
||||||
|
reraise(*self.exc_info)
|
||||||
|
|
||||||
|
def get(self, block=True, timeout=None):
|
||||||
|
"""Return the stored value or raise the exception.
|
||||||
|
|
||||||
|
If this instance already holds a value or an exception, return or raise it immediatelly.
|
||||||
|
Otherwise, block until another greenlet calls :meth:`set` or :meth:`set_exception` or
|
||||||
|
until the optional timeout occurs.
|
||||||
|
|
||||||
|
When the *timeout* argument is present and not ``None``, it should be a
|
||||||
|
floating point number specifying a timeout for the operation in seconds
|
||||||
|
(or fractions thereof). If the *timeout* elapses, the *Timeout* exception will
|
||||||
|
be raised.
|
||||||
|
|
||||||
|
:keyword bool block: If set to ``False`` and this instance is not ready,
|
||||||
|
immediately raise a :class:`Timeout` exception.
|
||||||
|
"""
|
||||||
|
if self._value is not _NONE:
|
||||||
|
return self._value
|
||||||
|
if self._exc_info:
|
||||||
|
return self._raise_exception()
|
||||||
|
|
||||||
|
if not block:
|
||||||
|
# Not ready and not blocking, so immediately timeout
|
||||||
|
raise Timeout()
|
||||||
|
|
||||||
|
# Wait, raising a timeout that elapses
|
||||||
|
self._wait_core(timeout, ())
|
||||||
|
|
||||||
|
# by definition we are now ready
|
||||||
|
return self.get(block=False)
|
||||||
|
|
||||||
|
def get_nowait(self):
|
||||||
|
"""
|
||||||
|
Return the value or raise the exception without blocking.
|
||||||
|
|
||||||
|
If this object is not yet :meth:`ready <ready>`, raise
|
||||||
|
:class:`gevent.Timeout` immediately.
|
||||||
|
"""
|
||||||
|
return self.get(block=False)
|
||||||
|
|
||||||
|
def _wait_return_value(self, waited, wait_success):
|
||||||
|
# pylint:disable=unused-argument
|
||||||
|
# Always return the value. Since this is a one-shot event,
|
||||||
|
# no race condition should reset it.
|
||||||
|
return self.value
|
||||||
|
|
||||||
|
def wait(self, timeout=None):
|
||||||
|
"""Block until the instance is ready.
|
||||||
|
|
||||||
|
If this instance already holds a value, it is returned immediately. If this
|
||||||
|
instance already holds an exception, ``None`` is returned immediately.
|
||||||
|
|
||||||
|
Otherwise, block until another greenlet calls :meth:`set` or :meth:`set_exception`
|
||||||
|
(at which point either the value or ``None`` will be returned, respectively),
|
||||||
|
or until the optional timeout expires (at which point ``None`` will also be
|
||||||
|
returned).
|
||||||
|
|
||||||
|
When the *timeout* argument is present and not ``None``, it should be a
|
||||||
|
floating point number specifying a timeout for the operation in seconds
|
||||||
|
(or fractions thereof).
|
||||||
|
|
||||||
|
.. note:: If a timeout is given and expires, ``None`` will be returned
|
||||||
|
(no timeout exception will be raised).
|
||||||
|
|
||||||
|
"""
|
||||||
|
return self._wait(timeout)
|
||||||
|
|
||||||
|
# link protocol
|
||||||
|
def __call__(self, source):
|
||||||
|
if source.successful():
|
||||||
|
self.set(source.value)
|
||||||
|
else:
|
||||||
|
self.set_exception(source.exception, getattr(source, 'exc_info', None))
|
||||||
|
|
||||||
|
# Methods to make us more like concurrent.futures.Future
|
||||||
|
|
||||||
|
def result(self, timeout=None):
|
||||||
|
return self.get(timeout=timeout)
|
||||||
|
|
||||||
|
set_result = set
|
||||||
|
|
||||||
|
def done(self):
|
||||||
|
return self.ready()
|
||||||
|
|
||||||
|
# we don't support cancelling
|
||||||
|
|
||||||
|
def cancel(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
def cancelled(self):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# exception is a method, we use it as a property
|
||||||
219
python/gevent/fileobject.py
Normal file
219
python/gevent/fileobject.py
Normal file
@@ -0,0 +1,219 @@
|
|||||||
|
"""
|
||||||
|
Wrappers to make file-like objects cooperative.
|
||||||
|
|
||||||
|
.. class:: FileObject
|
||||||
|
|
||||||
|
The main entry point to the file-like gevent-compatible behaviour. It will be defined
|
||||||
|
to be the best available implementation.
|
||||||
|
|
||||||
|
There are two main implementations of ``FileObject``. On all systems,
|
||||||
|
there is :class:`FileObjectThread` which uses the built-in native
|
||||||
|
threadpool to avoid blocking the entire interpreter. On UNIX systems
|
||||||
|
(those that support the :mod:`fcntl` module), there is also
|
||||||
|
:class:`FileObjectPosix` which uses native non-blocking semantics.
|
||||||
|
|
||||||
|
A third class, :class:`FileObjectBlock`, is simply a wrapper that executes everything
|
||||||
|
synchronously (and so is not gevent-compatible). It is provided for testing and debugging
|
||||||
|
purposes.
|
||||||
|
|
||||||
|
Configuration
|
||||||
|
=============
|
||||||
|
|
||||||
|
You may change the default value for ``FileObject`` using the
|
||||||
|
``GEVENT_FILE`` environment variable. Set it to ``posix``, ``thread``,
|
||||||
|
or ``block`` to choose from :class:`FileObjectPosix`,
|
||||||
|
:class:`FileObjectThread` and :class:`FileObjectBlock`, respectively.
|
||||||
|
You may also set it to the fully qualified class name of another
|
||||||
|
object that implements the file interface to use one of your own
|
||||||
|
objects.
|
||||||
|
|
||||||
|
.. note:: The environment variable must be set at the time this module
|
||||||
|
is first imported.
|
||||||
|
|
||||||
|
Classes
|
||||||
|
=======
|
||||||
|
"""
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
import functools
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
from gevent._fileobjectcommon import FileObjectClosed
|
||||||
|
from gevent._fileobjectcommon import FileObjectBase
|
||||||
|
from gevent.hub import get_hub
|
||||||
|
from gevent._compat import integer_types
|
||||||
|
from gevent._compat import reraise
|
||||||
|
from gevent.lock import Semaphore, DummySemaphore
|
||||||
|
|
||||||
|
|
||||||
|
PYPY = hasattr(sys, 'pypy_version_info')
|
||||||
|
|
||||||
|
if hasattr(sys, 'exc_clear'):
|
||||||
|
def _exc_clear():
|
||||||
|
sys.exc_clear()
|
||||||
|
else:
|
||||||
|
def _exc_clear():
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'FileObjectPosix',
|
||||||
|
'FileObjectThread',
|
||||||
|
'FileObject',
|
||||||
|
]
|
||||||
|
|
||||||
|
try:
|
||||||
|
from fcntl import fcntl
|
||||||
|
except ImportError:
|
||||||
|
__all__.remove("FileObjectPosix")
|
||||||
|
else:
|
||||||
|
del fcntl
|
||||||
|
from gevent._fileobjectposix import FileObjectPosix
|
||||||
|
|
||||||
|
|
||||||
|
class FileObjectThread(FileObjectBase):
|
||||||
|
"""
|
||||||
|
A file-like object wrapping another file-like object, performing all blocking
|
||||||
|
operations on that object in a background thread.
|
||||||
|
|
||||||
|
.. caution::
|
||||||
|
Attempting to change the threadpool or lock of an existing FileObjectThread
|
||||||
|
has undefined consequences.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1b1
|
||||||
|
The file object is closed using the threadpool. Note that whether or
|
||||||
|
not this action is synchronous or asynchronous is not documented.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, fobj, mode=None, bufsize=-1, close=True, threadpool=None, lock=True):
|
||||||
|
"""
|
||||||
|
:param fobj: The underlying file-like object to wrap, or an integer fileno
|
||||||
|
that will be pass to :func:`os.fdopen` along with *mode* and *bufsize*.
|
||||||
|
:keyword bool lock: If True (the default) then all operations will
|
||||||
|
be performed one-by-one. Note that this does not guarantee that, if using
|
||||||
|
this file object from multiple threads/greenlets, operations will be performed
|
||||||
|
in any particular order, only that no two operations will be attempted at the
|
||||||
|
same time. You can also pass your own :class:`gevent.lock.Semaphore` to synchronize
|
||||||
|
file operations with an external resource.
|
||||||
|
:keyword bool close: If True (the default) then when this object is closed,
|
||||||
|
the underlying object is closed as well.
|
||||||
|
"""
|
||||||
|
closefd = close
|
||||||
|
self.threadpool = threadpool or get_hub().threadpool
|
||||||
|
self.lock = lock
|
||||||
|
if self.lock is True:
|
||||||
|
self.lock = Semaphore()
|
||||||
|
elif not self.lock:
|
||||||
|
self.lock = DummySemaphore()
|
||||||
|
if not hasattr(self.lock, '__enter__'):
|
||||||
|
raise TypeError('Expected a Semaphore or boolean, got %r' % type(self.lock))
|
||||||
|
if isinstance(fobj, integer_types):
|
||||||
|
if not closefd:
|
||||||
|
# we cannot do this, since fdopen object will close the descriptor
|
||||||
|
raise TypeError('FileObjectThread does not support close=False on an fd.')
|
||||||
|
if mode is None:
|
||||||
|
assert bufsize == -1, "If you use the default mode, you can't choose a bufsize"
|
||||||
|
fobj = os.fdopen(fobj)
|
||||||
|
else:
|
||||||
|
fobj = os.fdopen(fobj, mode, bufsize)
|
||||||
|
|
||||||
|
self.__io_holder = [fobj] # signal for _wrap_method
|
||||||
|
super(FileObjectThread, self).__init__(fobj, closefd)
|
||||||
|
|
||||||
|
def _do_close(self, fobj, closefd):
|
||||||
|
self.__io_holder[0] = None # for _wrap_method
|
||||||
|
try:
|
||||||
|
with self.lock:
|
||||||
|
self.threadpool.apply(fobj.flush)
|
||||||
|
finally:
|
||||||
|
if closefd:
|
||||||
|
# Note that we're not taking the lock; older code
|
||||||
|
# did fobj.close() without going through the threadpool at all,
|
||||||
|
# so acquiring the lock could potentially introduce deadlocks
|
||||||
|
# that weren't present before. Avoiding the lock doesn't make
|
||||||
|
# the existing race condition any worse.
|
||||||
|
# We wrap the close in an exception handler and re-raise directly
|
||||||
|
# to avoid the (common, expected) IOError from being logged by the pool
|
||||||
|
def close():
|
||||||
|
try:
|
||||||
|
fobj.close()
|
||||||
|
except: # pylint:disable=bare-except
|
||||||
|
return sys.exc_info()
|
||||||
|
exc_info = self.threadpool.apply(close)
|
||||||
|
if exc_info:
|
||||||
|
reraise(*exc_info)
|
||||||
|
|
||||||
|
def _do_delegate_methods(self):
|
||||||
|
super(FileObjectThread, self)._do_delegate_methods()
|
||||||
|
if not hasattr(self, 'read1') and 'r' in getattr(self._io, 'mode', ''):
|
||||||
|
self.read1 = self.read
|
||||||
|
self.__io_holder[0] = self._io
|
||||||
|
|
||||||
|
def _extra_repr(self):
|
||||||
|
return ' threadpool=%r' % (self.threadpool,)
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def next(self):
|
||||||
|
line = self.readline()
|
||||||
|
if line:
|
||||||
|
return line
|
||||||
|
raise StopIteration
|
||||||
|
__next__ = next
|
||||||
|
|
||||||
|
def _wrap_method(self, method):
|
||||||
|
# NOTE: We are careful to avoid introducing a refcycle
|
||||||
|
# within self. Our wrapper cannot refer to self.
|
||||||
|
io_holder = self.__io_holder
|
||||||
|
lock = self.lock
|
||||||
|
threadpool = self.threadpool
|
||||||
|
|
||||||
|
@functools.wraps(method)
|
||||||
|
def thread_method(*args, **kwargs):
|
||||||
|
if io_holder[0] is None:
|
||||||
|
# This is different than FileObjectPosix, etc,
|
||||||
|
# because we want to save the expensive trip through
|
||||||
|
# the threadpool.
|
||||||
|
raise FileObjectClosed()
|
||||||
|
with lock:
|
||||||
|
return threadpool.apply(method, args, kwargs)
|
||||||
|
|
||||||
|
return thread_method
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
FileObject = FileObjectPosix
|
||||||
|
except NameError:
|
||||||
|
FileObject = FileObjectThread
|
||||||
|
|
||||||
|
|
||||||
|
class FileObjectBlock(FileObjectBase):
|
||||||
|
|
||||||
|
def __init__(self, fobj, *args, **kwargs):
|
||||||
|
closefd = kwargs.pop('close', True)
|
||||||
|
if kwargs:
|
||||||
|
raise TypeError('Unexpected arguments: %r' % kwargs.keys())
|
||||||
|
if isinstance(fobj, integer_types):
|
||||||
|
if not closefd:
|
||||||
|
# we cannot do this, since fdopen object will close the descriptor
|
||||||
|
raise TypeError('FileObjectBlock does not support close=False on an fd.')
|
||||||
|
fobj = os.fdopen(fobj, *args)
|
||||||
|
super(FileObjectBlock, self).__init__(fobj, closefd)
|
||||||
|
|
||||||
|
def _do_close(self, fobj, closefd):
|
||||||
|
fobj.close()
|
||||||
|
|
||||||
|
config = os.environ.get('GEVENT_FILE')
|
||||||
|
if config:
|
||||||
|
klass = {'thread': 'gevent.fileobject.FileObjectThread',
|
||||||
|
'posix': 'gevent.fileobject.FileObjectPosix',
|
||||||
|
'block': 'gevent.fileobject.FileObjectBlock'}.get(config, config)
|
||||||
|
if klass.startswith('gevent.fileobject.'):
|
||||||
|
FileObject = globals()[klass.split('.', 2)[-1]]
|
||||||
|
else:
|
||||||
|
from gevent.hub import _import
|
||||||
|
FileObject = _import(klass)
|
||||||
|
del klass
|
||||||
8807
python/gevent/gevent._semaphore.c
Normal file
8807
python/gevent/gevent._semaphore.c
Normal file
File diff suppressed because it is too large
Load Diff
14103
python/gevent/gevent.ares.c
Normal file
14103
python/gevent/gevent.ares.c
Normal file
File diff suppressed because it is too large
Load Diff
48
python/gevent/gevent.ares.h
Normal file
48
python/gevent/gevent.ares.h
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
/* Generated by Cython 0.25.2 */
|
||||||
|
|
||||||
|
#ifndef __PYX_HAVE__gevent__ares
|
||||||
|
#define __PYX_HAVE__gevent__ares
|
||||||
|
|
||||||
|
struct PyGeventAresChannelObject;
|
||||||
|
|
||||||
|
/* "gevent/ares.pyx":245
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* cdef public class channel [object PyGeventAresChannelObject, type PyGeventAresChannel_Type]: # <<<<<<<<<<<<<<
|
||||||
|
*
|
||||||
|
* cdef public object loop
|
||||||
|
*/
|
||||||
|
struct PyGeventAresChannelObject {
|
||||||
|
PyObject_HEAD
|
||||||
|
struct __pyx_vtabstruct_6gevent_4ares_channel *__pyx_vtab;
|
||||||
|
PyObject *loop;
|
||||||
|
struct ares_channeldata *channel;
|
||||||
|
PyObject *_watchers;
|
||||||
|
PyObject *_timer;
|
||||||
|
};
|
||||||
|
|
||||||
|
#ifndef __PYX_HAVE_API__gevent__ares
|
||||||
|
|
||||||
|
#ifndef __PYX_EXTERN_C
|
||||||
|
#ifdef __cplusplus
|
||||||
|
#define __PYX_EXTERN_C extern "C"
|
||||||
|
#else
|
||||||
|
#define __PYX_EXTERN_C extern
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef DL_IMPORT
|
||||||
|
#define DL_IMPORT(_T) _T
|
||||||
|
#endif
|
||||||
|
|
||||||
|
__PYX_EXTERN_C DL_IMPORT(PyTypeObject) PyGeventAresChannel_Type;
|
||||||
|
|
||||||
|
#endif /* !__PYX_HAVE_API__gevent__ares */
|
||||||
|
|
||||||
|
#if PY_MAJOR_VERSION < 3
|
||||||
|
PyMODINIT_FUNC initares(void);
|
||||||
|
#else
|
||||||
|
PyMODINIT_FUNC PyInit_ares(void);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* !__PYX_HAVE__gevent__ares */
|
||||||
744
python/gevent/greenlet.py
Normal file
744
python/gevent/greenlet.py
Normal file
@@ -0,0 +1,744 @@
|
|||||||
|
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
|
||||||
|
from __future__ import absolute_import
|
||||||
|
import sys
|
||||||
|
from greenlet import greenlet
|
||||||
|
from gevent._compat import PY3
|
||||||
|
from gevent._compat import PYPY
|
||||||
|
from gevent._compat import reraise
|
||||||
|
from gevent._util import Lazy
|
||||||
|
from gevent._tblib import dump_traceback
|
||||||
|
from gevent._tblib import load_traceback
|
||||||
|
from gevent.hub import GreenletExit
|
||||||
|
from gevent.hub import InvalidSwitchError
|
||||||
|
from gevent.hub import Waiter
|
||||||
|
from gevent.hub import get_hub
|
||||||
|
from gevent.hub import getcurrent
|
||||||
|
from gevent.hub import iwait
|
||||||
|
from gevent.hub import wait
|
||||||
|
from gevent.timeout import Timeout
|
||||||
|
from collections import deque
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'Greenlet',
|
||||||
|
'joinall',
|
||||||
|
'killall',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
if PYPY:
|
||||||
|
import _continuation # pylint:disable=import-error
|
||||||
|
_continulet = _continuation.continulet
|
||||||
|
|
||||||
|
|
||||||
|
class SpawnedLink(object):
|
||||||
|
"""A wrapper around link that calls it in another greenlet.
|
||||||
|
|
||||||
|
Can be called only from main loop.
|
||||||
|
"""
|
||||||
|
__slots__ = ['callback']
|
||||||
|
|
||||||
|
def __init__(self, callback):
|
||||||
|
if not callable(callback):
|
||||||
|
raise TypeError("Expected callable: %r" % (callback, ))
|
||||||
|
self.callback = callback
|
||||||
|
|
||||||
|
def __call__(self, source):
|
||||||
|
g = greenlet(self.callback, get_hub())
|
||||||
|
g.switch(source)
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return hash(self.callback)
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return self.callback == getattr(other, 'callback', other)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return str(self.callback)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return repr(self.callback)
|
||||||
|
|
||||||
|
def __getattr__(self, item):
|
||||||
|
assert item != 'callback'
|
||||||
|
return getattr(self.callback, item)
|
||||||
|
|
||||||
|
|
||||||
|
class SuccessSpawnedLink(SpawnedLink):
|
||||||
|
"""A wrapper around link that calls it in another greenlet only if source succeed.
|
||||||
|
|
||||||
|
Can be called only from main loop.
|
||||||
|
"""
|
||||||
|
__slots__ = []
|
||||||
|
|
||||||
|
def __call__(self, source):
|
||||||
|
if source.successful():
|
||||||
|
return SpawnedLink.__call__(self, source)
|
||||||
|
|
||||||
|
|
||||||
|
class FailureSpawnedLink(SpawnedLink):
|
||||||
|
"""A wrapper around link that calls it in another greenlet only if source failed.
|
||||||
|
|
||||||
|
Can be called only from main loop.
|
||||||
|
"""
|
||||||
|
__slots__ = []
|
||||||
|
|
||||||
|
def __call__(self, source):
|
||||||
|
if not source.successful():
|
||||||
|
return SpawnedLink.__call__(self, source)
|
||||||
|
|
||||||
|
class Greenlet(greenlet):
|
||||||
|
"""A light-weight cooperatively-scheduled execution unit.
|
||||||
|
"""
|
||||||
|
# pylint:disable=too-many-public-methods,too-many-instance-attributes
|
||||||
|
|
||||||
|
value = None
|
||||||
|
_exc_info = ()
|
||||||
|
_notifier = None
|
||||||
|
|
||||||
|
#: An event, such as a timer or a callback that fires. It is established in
|
||||||
|
#: start() and start_later() as those two objects, respectively.
|
||||||
|
#: Once this becomes non-None, the Greenlet cannot be started again. Conversely,
|
||||||
|
#: kill() and throw() check for non-None to determine if this object has ever been
|
||||||
|
#: scheduled for starting. A placeholder _dummy_event is assigned by them to prevent
|
||||||
|
#: the greenlet from being started in the future, if necessary.
|
||||||
|
_start_event = None
|
||||||
|
args = ()
|
||||||
|
_kwargs = None
|
||||||
|
|
||||||
|
def __init__(self, run=None, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Greenlet constructor.
|
||||||
|
|
||||||
|
:param args: The arguments passed to the ``run`` function.
|
||||||
|
:param kwargs: The keyword arguments passed to the ``run`` function.
|
||||||
|
:keyword run: The callable object to run. If not given, this object's
|
||||||
|
`_run` method will be invoked (typically defined by subclasses).
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1b1
|
||||||
|
The ``run`` argument to the constructor is now verified to be a callable
|
||||||
|
object. Previously, passing a non-callable object would fail after the greenlet
|
||||||
|
was spawned.
|
||||||
|
"""
|
||||||
|
# greenlet.greenlet(run=None, parent=None)
|
||||||
|
# Calling it with both positional arguments instead of a keyword
|
||||||
|
# argument (parent=get_hub()) speeds up creation of this object ~30%:
|
||||||
|
# python -m timeit -s 'import gevent' 'gevent.Greenlet()'
|
||||||
|
# Python 3.5: 2.70usec with keywords vs 1.94usec with positional
|
||||||
|
# Python 3.4: 2.32usec with keywords vs 1.74usec with positional
|
||||||
|
# Python 3.3: 2.55usec with keywords vs 1.92usec with positional
|
||||||
|
# Python 2.7: 1.73usec with keywords vs 1.40usec with positional
|
||||||
|
greenlet.__init__(self, None, get_hub())
|
||||||
|
|
||||||
|
if run is not None:
|
||||||
|
self._run = run
|
||||||
|
|
||||||
|
# If they didn't pass a callable at all, then they must
|
||||||
|
# already have one. Note that subclassing to override the run() method
|
||||||
|
# itself has never been documented or supported.
|
||||||
|
if not callable(self._run):
|
||||||
|
raise TypeError("The run argument or self._run must be callable")
|
||||||
|
|
||||||
|
if args:
|
||||||
|
self.args = args
|
||||||
|
if kwargs:
|
||||||
|
self._kwargs = kwargs
|
||||||
|
|
||||||
|
@property
|
||||||
|
def kwargs(self):
|
||||||
|
return self._kwargs or {}
|
||||||
|
|
||||||
|
@Lazy
|
||||||
|
def _links(self):
|
||||||
|
return deque()
|
||||||
|
|
||||||
|
def _has_links(self):
|
||||||
|
return '_links' in self.__dict__ and self._links
|
||||||
|
|
||||||
|
def _raise_exception(self):
|
||||||
|
reraise(*self.exc_info)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def loop(self):
|
||||||
|
# needed by killall
|
||||||
|
return self.parent.loop
|
||||||
|
|
||||||
|
def __bool__(self):
|
||||||
|
return self._start_event is not None and self._exc_info is Greenlet._exc_info
|
||||||
|
__nonzero__ = __bool__
|
||||||
|
|
||||||
|
### Lifecycle
|
||||||
|
|
||||||
|
if PYPY:
|
||||||
|
# oops - pypy's .dead relies on __nonzero__ which we overriden above
|
||||||
|
@property
|
||||||
|
def dead(self):
|
||||||
|
if self._greenlet__main:
|
||||||
|
return False
|
||||||
|
if self.__start_cancelled_by_kill or self.__started_but_aborted:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return self._greenlet__started and not _continulet.is_pending(self)
|
||||||
|
else:
|
||||||
|
@property
|
||||||
|
def dead(self):
|
||||||
|
return self.__start_cancelled_by_kill or self.__started_but_aborted or greenlet.dead.__get__(self)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def __never_started_or_killed(self):
|
||||||
|
return self._start_event is None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def __start_pending(self):
|
||||||
|
return (self._start_event is not None
|
||||||
|
and (self._start_event.pending or getattr(self._start_event, 'active', False)))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def __start_cancelled_by_kill(self):
|
||||||
|
return self._start_event is _cancelled_start_event
|
||||||
|
|
||||||
|
@property
|
||||||
|
def __start_completed(self):
|
||||||
|
return self._start_event is _start_completed_event
|
||||||
|
|
||||||
|
@property
|
||||||
|
def __started_but_aborted(self):
|
||||||
|
return (not self.__never_started_or_killed # we have been started or killed
|
||||||
|
and not self.__start_cancelled_by_kill # we weren't killed, so we must have been started
|
||||||
|
and not self.__start_completed # the start never completed
|
||||||
|
and not self.__start_pending) # and we're not pending, so we must have been aborted
|
||||||
|
|
||||||
|
def __cancel_start(self):
|
||||||
|
if self._start_event is None:
|
||||||
|
# prevent self from ever being started in the future
|
||||||
|
self._start_event = _cancelled_start_event
|
||||||
|
# cancel any pending start event
|
||||||
|
# NOTE: If this was a real pending start event, this will leave a
|
||||||
|
# "dangling" callback/timer object in the hub.loop.callbacks list;
|
||||||
|
# depending on where we are in the event loop, it may even be in a local
|
||||||
|
# variable copy of that list (in _run_callbacks). This isn't a problem,
|
||||||
|
# except for the leak-tests.
|
||||||
|
self._start_event.stop()
|
||||||
|
|
||||||
|
def __handle_death_before_start(self, *args):
|
||||||
|
# args is (t, v, tb) or simply t or v
|
||||||
|
if self._exc_info is Greenlet._exc_info and self.dead:
|
||||||
|
# the greenlet was never switched to before and it will never be, _report_error was not called
|
||||||
|
# the result was not set and the links weren't notified. let's do it here.
|
||||||
|
# checking that self.dead is true is essential, because throw() does not necessarily kill the greenlet
|
||||||
|
# (if the exception raised by throw() is caught somewhere inside the greenlet).
|
||||||
|
if len(args) == 1:
|
||||||
|
arg = args[0]
|
||||||
|
#if isinstance(arg, type):
|
||||||
|
if type(arg) is type(Exception):
|
||||||
|
args = (arg, arg(), None)
|
||||||
|
else:
|
||||||
|
args = (type(arg), arg, None)
|
||||||
|
elif not args:
|
||||||
|
args = (GreenletExit, GreenletExit(), None)
|
||||||
|
self._report_error(args)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def started(self):
|
||||||
|
# DEPRECATED
|
||||||
|
return bool(self)
|
||||||
|
|
||||||
|
def ready(self):
|
||||||
|
"""
|
||||||
|
Return a true value if and only if the greenlet has finished
|
||||||
|
execution.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1
|
||||||
|
This function is only guaranteed to return true or false *values*, not
|
||||||
|
necessarily the literal constants ``True`` or ``False``.
|
||||||
|
"""
|
||||||
|
return self.dead or self._exc_info
|
||||||
|
|
||||||
|
def successful(self):
|
||||||
|
"""
|
||||||
|
Return a true value if and only if the greenlet has finished execution
|
||||||
|
successfully, that is, without raising an error.
|
||||||
|
|
||||||
|
.. tip:: A greenlet that has been killed with the default
|
||||||
|
:class:`GreenletExit` exception is considered successful.
|
||||||
|
That is, ``GreenletExit`` is not considered an error.
|
||||||
|
|
||||||
|
.. note:: This function is only guaranteed to return true or false *values*,
|
||||||
|
not necessarily the literal constants ``True`` or ``False``.
|
||||||
|
"""
|
||||||
|
return self._exc_info and self._exc_info[1] is None
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
classname = self.__class__.__name__
|
||||||
|
result = '<%s at %s' % (classname, hex(id(self)))
|
||||||
|
formatted = self._formatinfo()
|
||||||
|
if formatted:
|
||||||
|
result += ': ' + formatted
|
||||||
|
return result + '>'
|
||||||
|
|
||||||
|
_formatted_info = None
|
||||||
|
|
||||||
|
def _formatinfo(self):
|
||||||
|
info = self._formatted_info
|
||||||
|
if info is not None:
|
||||||
|
return info
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = getfuncname(self.__dict__['_run'])
|
||||||
|
except Exception: # pylint:disable=broad-except
|
||||||
|
# Don't cache
|
||||||
|
return ''
|
||||||
|
|
||||||
|
args = []
|
||||||
|
if self.args:
|
||||||
|
args = [repr(x)[:50] for x in self.args]
|
||||||
|
if self._kwargs:
|
||||||
|
args.extend(['%s=%s' % (key, repr(value)[:50]) for (key, value) in self._kwargs.items()])
|
||||||
|
if args:
|
||||||
|
result += '(' + ', '.join(args) + ')'
|
||||||
|
# it is important to save the result here, because once the greenlet exits '_run' attribute will be removed
|
||||||
|
self._formatted_info = result
|
||||||
|
return result
|
||||||
|
|
||||||
|
@property
|
||||||
|
def exception(self):
|
||||||
|
"""Holds the exception instance raised by the function if the greenlet has finished with an error.
|
||||||
|
Otherwise ``None``.
|
||||||
|
"""
|
||||||
|
return self._exc_info[1] if self._exc_info else None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def exc_info(self):
|
||||||
|
"""
|
||||||
|
Holds the exc_info three-tuple raised by the function if the
|
||||||
|
greenlet finished with an error. Otherwise a false value.
|
||||||
|
|
||||||
|
.. note:: This is a provisional API and may change.
|
||||||
|
|
||||||
|
.. versionadded:: 1.1
|
||||||
|
"""
|
||||||
|
e = self._exc_info
|
||||||
|
if e and e[0] is not None:
|
||||||
|
return (e[0], e[1], load_traceback(e[2]))
|
||||||
|
|
||||||
|
def throw(self, *args):
|
||||||
|
"""Immediatelly switch into the greenlet and raise an exception in it.
|
||||||
|
|
||||||
|
Should only be called from the HUB, otherwise the current greenlet is left unscheduled forever.
|
||||||
|
To raise an exception in a safe manner from any greenlet, use :meth:`kill`.
|
||||||
|
|
||||||
|
If a greenlet was started but never switched to yet, then also
|
||||||
|
a) cancel the event that will start it
|
||||||
|
b) fire the notifications as if an exception was raised in a greenlet
|
||||||
|
"""
|
||||||
|
self.__cancel_start()
|
||||||
|
|
||||||
|
try:
|
||||||
|
if not self.dead:
|
||||||
|
# Prevent switching into a greenlet *at all* if we had never
|
||||||
|
# started it. Usually this is the same thing that happens by throwing,
|
||||||
|
# but if this is done from the hub with nothing else running, prevents a
|
||||||
|
# LoopExit.
|
||||||
|
greenlet.throw(self, *args)
|
||||||
|
finally:
|
||||||
|
self.__handle_death_before_start(*args)
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
"""Schedule the greenlet to run in this loop iteration"""
|
||||||
|
if self._start_event is None:
|
||||||
|
self._start_event = self.parent.loop.run_callback(self.switch)
|
||||||
|
|
||||||
|
def start_later(self, seconds):
|
||||||
|
"""Schedule the greenlet to run in the future loop iteration *seconds* later"""
|
||||||
|
if self._start_event is None:
|
||||||
|
self._start_event = self.parent.loop.timer(seconds)
|
||||||
|
self._start_event.start(self.switch)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def spawn(cls, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Create a new :class:`Greenlet` object and schedule it to run ``function(*args, **kwargs)``.
|
||||||
|
This can be used as ``gevent.spawn`` or ``Greenlet.spawn``.
|
||||||
|
|
||||||
|
The arguments are passed to :meth:`Greenlet.__init__`.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1b1
|
||||||
|
If a *function* is given that is not callable, immediately raise a :exc:`TypeError`
|
||||||
|
instead of spawning a greenlet that will raise an uncaught TypeError.
|
||||||
|
"""
|
||||||
|
g = cls(*args, **kwargs)
|
||||||
|
g.start()
|
||||||
|
return g
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def spawn_later(cls, seconds, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Create and return a new Greenlet object scheduled to run ``function(*args, **kwargs)``
|
||||||
|
in the future loop iteration *seconds* later. This can be used as ``Greenlet.spawn_later``
|
||||||
|
or ``gevent.spawn_later``.
|
||||||
|
|
||||||
|
The arguments are passed to :meth:`Greenlet.__init__`.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1b1
|
||||||
|
If an argument that's meant to be a function (the first argument in *args*, or the ``run`` keyword )
|
||||||
|
is given to this classmethod (and not a classmethod of a subclass),
|
||||||
|
it is verified to be callable. Previously, the spawned greenlet would have failed
|
||||||
|
when it started running.
|
||||||
|
"""
|
||||||
|
if cls is Greenlet and not args and 'run' not in kwargs:
|
||||||
|
raise TypeError("")
|
||||||
|
g = cls(*args, **kwargs)
|
||||||
|
g.start_later(seconds)
|
||||||
|
return g
|
||||||
|
|
||||||
|
def kill(self, exception=GreenletExit, block=True, timeout=None):
|
||||||
|
"""
|
||||||
|
Raise the ``exception`` in the greenlet.
|
||||||
|
|
||||||
|
If ``block`` is ``True`` (the default), wait until the greenlet dies or the optional timeout expires.
|
||||||
|
If block is ``False``, the current greenlet is not unscheduled.
|
||||||
|
|
||||||
|
The function always returns ``None`` and never raises an error.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Depending on what this greenlet is executing and the state
|
||||||
|
of the event loop, the exception may or may not be raised
|
||||||
|
immediately when this greenlet resumes execution. It may
|
||||||
|
be raised on a subsequent green call, or, if this greenlet
|
||||||
|
exits before making such a call, it may not be raised at
|
||||||
|
all. As of 1.1, an example where the exception is raised
|
||||||
|
later is if this greenlet had called :func:`sleep(0)
|
||||||
|
<gevent.sleep>`; an example where the exception is raised
|
||||||
|
immediately is if this greenlet had called
|
||||||
|
:func:`sleep(0.1) <gevent.sleep>`.
|
||||||
|
|
||||||
|
.. caution::
|
||||||
|
|
||||||
|
Use care when killing greenlets. If the code executing is not
|
||||||
|
exception safe (e.g., makes proper use of ``finally``) then an
|
||||||
|
unexpected exception could result in corrupted state.
|
||||||
|
|
||||||
|
See also :func:`gevent.kill`.
|
||||||
|
|
||||||
|
:keyword type exception: The type of exception to raise in the greenlet. The default
|
||||||
|
is :class:`GreenletExit`, which indicates a :meth:`successful` completion
|
||||||
|
of the greenlet.
|
||||||
|
|
||||||
|
.. versionchanged:: 0.13.0
|
||||||
|
*block* is now ``True`` by default.
|
||||||
|
.. versionchanged:: 1.1a2
|
||||||
|
If this greenlet had never been switched to, killing it will prevent it from ever being switched to.
|
||||||
|
"""
|
||||||
|
self.__cancel_start()
|
||||||
|
|
||||||
|
if self.dead:
|
||||||
|
self.__handle_death_before_start(exception)
|
||||||
|
else:
|
||||||
|
waiter = Waiter() if block else None
|
||||||
|
self.parent.loop.run_callback(_kill, self, exception, waiter)
|
||||||
|
if block:
|
||||||
|
waiter.get()
|
||||||
|
self.join(timeout)
|
||||||
|
# it should be OK to use kill() in finally or kill a greenlet from more than one place;
|
||||||
|
# thus it should not raise when the greenlet is already killed (= not started)
|
||||||
|
|
||||||
|
def get(self, block=True, timeout=None):
|
||||||
|
"""Return the result the greenlet has returned or re-raise the exception it has raised.
|
||||||
|
|
||||||
|
If block is ``False``, raise :class:`gevent.Timeout` if the greenlet is still alive.
|
||||||
|
If block is ``True``, unschedule the current greenlet until the result is available
|
||||||
|
or the timeout expires. In the latter case, :class:`gevent.Timeout` is raised.
|
||||||
|
"""
|
||||||
|
if self.ready():
|
||||||
|
if self.successful():
|
||||||
|
return self.value
|
||||||
|
self._raise_exception()
|
||||||
|
if not block:
|
||||||
|
raise Timeout()
|
||||||
|
|
||||||
|
switch = getcurrent().switch
|
||||||
|
self.rawlink(switch)
|
||||||
|
try:
|
||||||
|
t = Timeout._start_new_or_dummy(timeout)
|
||||||
|
try:
|
||||||
|
result = self.parent.switch()
|
||||||
|
if result is not self:
|
||||||
|
raise InvalidSwitchError('Invalid switch into Greenlet.get(): %r' % (result, ))
|
||||||
|
finally:
|
||||||
|
t.cancel()
|
||||||
|
except:
|
||||||
|
# unlinking in 'except' instead of finally is an optimization:
|
||||||
|
# if switch occurred normally then link was already removed in _notify_links
|
||||||
|
# and there's no need to touch the links set.
|
||||||
|
# Note, however, that if "Invalid switch" assert was removed and invalid switch
|
||||||
|
# did happen, the link would remain, causing another invalid switch later in this greenlet.
|
||||||
|
self.unlink(switch)
|
||||||
|
raise
|
||||||
|
|
||||||
|
if self.ready():
|
||||||
|
if self.successful():
|
||||||
|
return self.value
|
||||||
|
self._raise_exception()
|
||||||
|
|
||||||
|
def join(self, timeout=None):
|
||||||
|
"""Wait until the greenlet finishes or *timeout* expires.
|
||||||
|
Return ``None`` regardless.
|
||||||
|
"""
|
||||||
|
if self.ready():
|
||||||
|
return
|
||||||
|
|
||||||
|
switch = getcurrent().switch
|
||||||
|
self.rawlink(switch)
|
||||||
|
try:
|
||||||
|
t = Timeout._start_new_or_dummy(timeout)
|
||||||
|
try:
|
||||||
|
result = self.parent.switch()
|
||||||
|
if result is not self:
|
||||||
|
raise InvalidSwitchError('Invalid switch into Greenlet.join(): %r' % (result, ))
|
||||||
|
finally:
|
||||||
|
t.cancel()
|
||||||
|
except Timeout as ex:
|
||||||
|
self.unlink(switch)
|
||||||
|
if ex is not t:
|
||||||
|
raise
|
||||||
|
except:
|
||||||
|
self.unlink(switch)
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _report_result(self, result):
|
||||||
|
self._exc_info = (None, None, None)
|
||||||
|
self.value = result
|
||||||
|
if self._has_links() and not self._notifier:
|
||||||
|
self._notifier = self.parent.loop.run_callback(self._notify_links)
|
||||||
|
|
||||||
|
def _report_error(self, exc_info):
|
||||||
|
if isinstance(exc_info[1], GreenletExit):
|
||||||
|
self._report_result(exc_info[1])
|
||||||
|
return
|
||||||
|
|
||||||
|
self._exc_info = exc_info[0], exc_info[1], dump_traceback(exc_info[2])
|
||||||
|
|
||||||
|
if self._has_links() and not self._notifier:
|
||||||
|
self._notifier = self.parent.loop.run_callback(self._notify_links)
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.parent.handle_error(self, *exc_info)
|
||||||
|
finally:
|
||||||
|
del exc_info
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
try:
|
||||||
|
self.__cancel_start()
|
||||||
|
self._start_event = _start_completed_event
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = self._run(*self.args, **self.kwargs)
|
||||||
|
except: # pylint:disable=bare-except
|
||||||
|
self._report_error(sys.exc_info())
|
||||||
|
return
|
||||||
|
self._report_result(result)
|
||||||
|
finally:
|
||||||
|
self.__dict__.pop('_run', None)
|
||||||
|
self.__dict__.pop('args', None)
|
||||||
|
self.__dict__.pop('kwargs', None)
|
||||||
|
|
||||||
|
def _run(self):
|
||||||
|
"""Subclasses may override this method to take any number of arguments and keyword arguments.
|
||||||
|
|
||||||
|
.. versionadded:: 1.1a3
|
||||||
|
Previously, if no callable object was passed to the constructor, the spawned greenlet would
|
||||||
|
later fail with an AttributeError.
|
||||||
|
"""
|
||||||
|
# We usually override this in __init__
|
||||||
|
# pylint: disable=method-hidden
|
||||||
|
return
|
||||||
|
|
||||||
|
def rawlink(self, callback):
|
||||||
|
"""Register a callable to be executed when the greenlet finishes execution.
|
||||||
|
|
||||||
|
The *callback* will be called with this instance as an argument.
|
||||||
|
|
||||||
|
.. caution:: The callable will be called in the HUB greenlet.
|
||||||
|
"""
|
||||||
|
if not callable(callback):
|
||||||
|
raise TypeError('Expected callable: %r' % (callback, ))
|
||||||
|
self._links.append(callback) # pylint:disable=no-member
|
||||||
|
if self.ready() and self._links and not self._notifier:
|
||||||
|
self._notifier = self.parent.loop.run_callback(self._notify_links)
|
||||||
|
|
||||||
|
def link(self, callback, SpawnedLink=SpawnedLink):
|
||||||
|
"""
|
||||||
|
Link greenlet's completion to a callable.
|
||||||
|
|
||||||
|
The *callback* will be called with this instance as an
|
||||||
|
argument once this greenlet is dead. A callable is called in
|
||||||
|
its own :class:`greenlet.greenlet` (*not* a
|
||||||
|
:class:`Greenlet`).
|
||||||
|
"""
|
||||||
|
# XXX: Is the redefinition of SpawnedLink supposed to just be an
|
||||||
|
# optimization, or do people use it? It's not documented
|
||||||
|
# pylint:disable=redefined-outer-name
|
||||||
|
self.rawlink(SpawnedLink(callback))
|
||||||
|
|
||||||
|
def unlink(self, callback):
|
||||||
|
"""Remove the callback set by :meth:`link` or :meth:`rawlink`"""
|
||||||
|
try:
|
||||||
|
self._links.remove(callback) # pylint:disable=no-member
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def link_value(self, callback, SpawnedLink=SuccessSpawnedLink):
|
||||||
|
"""
|
||||||
|
Like :meth:`link` but *callback* is only notified when the greenlet
|
||||||
|
has completed successfully.
|
||||||
|
"""
|
||||||
|
# pylint:disable=redefined-outer-name
|
||||||
|
self.link(callback, SpawnedLink=SpawnedLink)
|
||||||
|
|
||||||
|
def link_exception(self, callback, SpawnedLink=FailureSpawnedLink):
|
||||||
|
"""Like :meth:`link` but *callback* is only notified when the greenlet dies because of an unhandled exception."""
|
||||||
|
# pylint:disable=redefined-outer-name
|
||||||
|
self.link(callback, SpawnedLink=SpawnedLink)
|
||||||
|
|
||||||
|
def _notify_links(self):
|
||||||
|
while self._links:
|
||||||
|
link = self._links.popleft() # pylint:disable=no-member
|
||||||
|
try:
|
||||||
|
link(self)
|
||||||
|
except: # pylint:disable=bare-except
|
||||||
|
self.parent.handle_error((link, self), *sys.exc_info())
|
||||||
|
|
||||||
|
|
||||||
|
class _dummy_event(object):
|
||||||
|
pending = False
|
||||||
|
active = False
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def start(self, cb): # pylint:disable=unused-argument
|
||||||
|
raise AssertionError("Cannot start the dummy event")
|
||||||
|
|
||||||
|
|
||||||
|
_cancelled_start_event = _dummy_event()
|
||||||
|
_start_completed_event = _dummy_event()
|
||||||
|
del _dummy_event
|
||||||
|
|
||||||
|
|
||||||
|
def _kill(glet, exception, waiter):
|
||||||
|
try:
|
||||||
|
glet.throw(exception)
|
||||||
|
except: # pylint:disable=bare-except
|
||||||
|
# XXX do we need this here?
|
||||||
|
glet.parent.handle_error(glet, *sys.exc_info())
|
||||||
|
if waiter is not None:
|
||||||
|
waiter.switch()
|
||||||
|
|
||||||
|
|
||||||
|
def joinall(greenlets, timeout=None, raise_error=False, count=None):
|
||||||
|
"""
|
||||||
|
Wait for the ``greenlets`` to finish.
|
||||||
|
|
||||||
|
:param greenlets: A sequence (supporting :func:`len`) of greenlets to wait for.
|
||||||
|
:keyword float timeout: If given, the maximum number of seconds to wait.
|
||||||
|
:return: A sequence of the greenlets that finished before the timeout (if any)
|
||||||
|
expired.
|
||||||
|
"""
|
||||||
|
if not raise_error:
|
||||||
|
return wait(greenlets, timeout=timeout, count=count)
|
||||||
|
|
||||||
|
done = []
|
||||||
|
for obj in iwait(greenlets, timeout=timeout, count=count):
|
||||||
|
if getattr(obj, 'exception', None) is not None:
|
||||||
|
if hasattr(obj, '_raise_exception'):
|
||||||
|
obj._raise_exception()
|
||||||
|
else:
|
||||||
|
raise obj.exception
|
||||||
|
done.append(obj)
|
||||||
|
return done
|
||||||
|
|
||||||
|
|
||||||
|
def _killall3(greenlets, exception, waiter):
|
||||||
|
diehards = []
|
||||||
|
for g in greenlets:
|
||||||
|
if not g.dead:
|
||||||
|
try:
|
||||||
|
g.throw(exception)
|
||||||
|
except: # pylint:disable=bare-except
|
||||||
|
g.parent.handle_error(g, *sys.exc_info())
|
||||||
|
if not g.dead:
|
||||||
|
diehards.append(g)
|
||||||
|
waiter.switch(diehards)
|
||||||
|
|
||||||
|
|
||||||
|
def _killall(greenlets, exception):
|
||||||
|
for g in greenlets:
|
||||||
|
if not g.dead:
|
||||||
|
try:
|
||||||
|
g.throw(exception)
|
||||||
|
except: # pylint:disable=bare-except
|
||||||
|
g.parent.handle_error(g, *sys.exc_info())
|
||||||
|
|
||||||
|
|
||||||
|
def killall(greenlets, exception=GreenletExit, block=True, timeout=None):
|
||||||
|
"""
|
||||||
|
Forceably terminate all the ``greenlets`` by causing them to raise ``exception``.
|
||||||
|
|
||||||
|
.. caution:: Use care when killing greenlets. If they are not prepared for exceptions,
|
||||||
|
this could result in corrupted state.
|
||||||
|
|
||||||
|
:param greenlets: A **bounded** iterable of the non-None greenlets to terminate.
|
||||||
|
*All* the items in this iterable must be greenlets that belong to the same thread.
|
||||||
|
:keyword exception: The exception to raise in the greenlets. By default this is
|
||||||
|
:class:`GreenletExit`.
|
||||||
|
:keyword bool block: If True (the default) then this function only returns when all the
|
||||||
|
greenlets are dead; the current greenlet is unscheduled during that process.
|
||||||
|
If greenlets ignore the initial exception raised in them,
|
||||||
|
then they will be joined (with :func:`gevent.joinall`) and allowed to die naturally.
|
||||||
|
If False, this function returns immediately and greenlets will raise
|
||||||
|
the exception asynchronously.
|
||||||
|
:keyword float timeout: A time in seconds to wait for greenlets to die. If given, it is
|
||||||
|
only honored when ``block`` is True.
|
||||||
|
:raise Timeout: If blocking and a timeout is given that elapses before
|
||||||
|
all the greenlets are dead.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1a2
|
||||||
|
*greenlets* can be any iterable of greenlets, like an iterator or a set.
|
||||||
|
Previously it had to be a list or tuple.
|
||||||
|
"""
|
||||||
|
# support non-indexable containers like iterators or set objects
|
||||||
|
greenlets = list(greenlets)
|
||||||
|
if not greenlets:
|
||||||
|
return
|
||||||
|
loop = greenlets[0].loop
|
||||||
|
if block:
|
||||||
|
waiter = Waiter()
|
||||||
|
loop.run_callback(_killall3, greenlets, exception, waiter)
|
||||||
|
t = Timeout._start_new_or_dummy(timeout)
|
||||||
|
try:
|
||||||
|
alive = waiter.get()
|
||||||
|
if alive:
|
||||||
|
joinall(alive, raise_error=False)
|
||||||
|
finally:
|
||||||
|
t.cancel()
|
||||||
|
else:
|
||||||
|
loop.run_callback(_killall, greenlets, exception)
|
||||||
|
|
||||||
|
|
||||||
|
if PY3:
|
||||||
|
_meth_self = "__self__"
|
||||||
|
else:
|
||||||
|
_meth_self = "im_self"
|
||||||
|
|
||||||
|
|
||||||
|
def getfuncname(func):
|
||||||
|
if not hasattr(func, _meth_self):
|
||||||
|
try:
|
||||||
|
funcname = func.__name__
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
if funcname != '<lambda>':
|
||||||
|
return funcname
|
||||||
|
return repr(func)
|
||||||
1052
python/gevent/hub.py
Normal file
1052
python/gevent/hub.py
Normal file
File diff suppressed because it is too large
Load Diff
0
python/gevent/libev/__init__.py
Normal file
0
python/gevent/libev/__init__.py
Normal file
75
python/gevent/libev/_corecffi_build.py
Normal file
75
python/gevent/libev/_corecffi_build.py
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
# pylint: disable=no-member
|
||||||
|
|
||||||
|
# This module is only used to create and compile the gevent._corecffi module;
|
||||||
|
# nothing should be directly imported from it except `ffi`, which should only be
|
||||||
|
# used for `ffi.compile()`; programs should import gevent._corecfffi.
|
||||||
|
# However, because we are using "out-of-line" mode, it is necessary to examine
|
||||||
|
# this file to know what functions are created and available on the generated
|
||||||
|
# module.
|
||||||
|
from __future__ import absolute_import, print_function
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import os.path # pylint:disable=no-name-in-module
|
||||||
|
import struct
|
||||||
|
|
||||||
|
__all__ = []
|
||||||
|
|
||||||
|
|
||||||
|
def system_bits():
|
||||||
|
return struct.calcsize('P') * 8
|
||||||
|
|
||||||
|
|
||||||
|
def st_nlink_type():
|
||||||
|
if sys.platform == "darwin" or sys.platform.startswith("freebsd"):
|
||||||
|
return "short"
|
||||||
|
if system_bits() == 32:
|
||||||
|
return "unsigned long"
|
||||||
|
return "long long"
|
||||||
|
|
||||||
|
|
||||||
|
from cffi import FFI
|
||||||
|
ffi = FFI()
|
||||||
|
|
||||||
|
thisdir = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
def read_source(name):
|
||||||
|
with open(os.path.join(thisdir, name), 'r') as f:
|
||||||
|
return f.read()
|
||||||
|
|
||||||
|
_cdef = read_source('_corecffi_cdef.c')
|
||||||
|
_source = read_source('_corecffi_source.c')
|
||||||
|
|
||||||
|
_cdef = _cdef.replace('#define GEVENT_ST_NLINK_T int', '')
|
||||||
|
_cdef = _cdef.replace('#define GEVENT_STRUCT_DONE int', '')
|
||||||
|
_cdef = _cdef.replace('GEVENT_ST_NLINK_T', st_nlink_type())
|
||||||
|
_cdef = _cdef.replace("GEVENT_STRUCT_DONE _;", '...;')
|
||||||
|
|
||||||
|
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
# We must have the vfd_open, etc, functions on
|
||||||
|
# Windows. But on other platforms, going through
|
||||||
|
# CFFI to just return the file-descriptor is slower
|
||||||
|
# than just doing it in Python, so we check for and
|
||||||
|
# workaround their absence in corecffi.py
|
||||||
|
_cdef += """
|
||||||
|
typedef int... vfd_socket_t;
|
||||||
|
int vfd_open(vfd_socket_t);
|
||||||
|
vfd_socket_t vfd_get(int);
|
||||||
|
void vfd_free(int);
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
include_dirs = [
|
||||||
|
thisdir, # libev_vfd.h
|
||||||
|
os.path.abspath(os.path.join(thisdir, '..', '..', '..', 'deps', 'libev')),
|
||||||
|
]
|
||||||
|
ffi.cdef(_cdef)
|
||||||
|
ffi.set_source('gevent.libev._corecffi', _source, include_dirs=include_dirs)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
# XXX: Note, on Windows, we would need to specify the external libraries
|
||||||
|
# that should be linked in, such as ws2_32 and (because libev_vfd.h makes
|
||||||
|
# Python.h calls) the proper Python library---at least for PyPy. I never got
|
||||||
|
# that to work though, and calling python functions is strongly discouraged
|
||||||
|
# from CFFI code.
|
||||||
|
ffi.compile()
|
||||||
226
python/gevent/libev/_corecffi_cdef.c
Normal file
226
python/gevent/libev/_corecffi_cdef.c
Normal file
@@ -0,0 +1,226 @@
|
|||||||
|
/* libev interface */
|
||||||
|
|
||||||
|
#define EV_MINPRI ...
|
||||||
|
#define EV_MAXPRI ...
|
||||||
|
|
||||||
|
#define EV_VERSION_MAJOR ...
|
||||||
|
#define EV_VERSION_MINOR ...
|
||||||
|
|
||||||
|
#define EV_UNDEF ...
|
||||||
|
#define EV_NONE ...
|
||||||
|
#define EV_READ ...
|
||||||
|
#define EV_WRITE ...
|
||||||
|
#define EV__IOFDSET ...
|
||||||
|
#define EV_TIMER ...
|
||||||
|
#define EV_PERIODIC ...
|
||||||
|
#define EV_SIGNAL ...
|
||||||
|
#define EV_CHILD ...
|
||||||
|
#define EV_STAT ...
|
||||||
|
#define EV_IDLE ...
|
||||||
|
#define EV_PREPARE ...
|
||||||
|
#define EV_CHECK ...
|
||||||
|
#define EV_EMBED ...
|
||||||
|
#define EV_FORK ...
|
||||||
|
#define EV_CLEANUP ...
|
||||||
|
#define EV_ASYNC ...
|
||||||
|
#define EV_CUSTOM ...
|
||||||
|
#define EV_ERROR ...
|
||||||
|
|
||||||
|
#define EVFLAG_AUTO ...
|
||||||
|
#define EVFLAG_NOENV ...
|
||||||
|
#define EVFLAG_FORKCHECK ...
|
||||||
|
#define EVFLAG_NOINOTIFY ...
|
||||||
|
#define EVFLAG_SIGNALFD ...
|
||||||
|
#define EVFLAG_NOSIGMASK ...
|
||||||
|
|
||||||
|
#define EVBACKEND_SELECT ...
|
||||||
|
#define EVBACKEND_POLL ...
|
||||||
|
#define EVBACKEND_EPOLL ...
|
||||||
|
#define EVBACKEND_KQUEUE ...
|
||||||
|
#define EVBACKEND_DEVPOLL ...
|
||||||
|
#define EVBACKEND_PORT ...
|
||||||
|
/* #define EVBACKEND_IOCP ... */
|
||||||
|
|
||||||
|
#define EVBACKEND_ALL ...
|
||||||
|
#define EVBACKEND_MASK ...
|
||||||
|
|
||||||
|
#define EVRUN_NOWAIT ...
|
||||||
|
#define EVRUN_ONCE ...
|
||||||
|
|
||||||
|
#define EVBREAK_CANCEL ...
|
||||||
|
#define EVBREAK_ONE ...
|
||||||
|
#define EVBREAK_ALL ...
|
||||||
|
|
||||||
|
/* markers for the CFFI parser. Replaced when the string is read. */
|
||||||
|
#define GEVENT_STRUCT_DONE int
|
||||||
|
#define GEVENT_ST_NLINK_T int
|
||||||
|
|
||||||
|
struct ev_loop {
|
||||||
|
int backend_fd;
|
||||||
|
int activecnt;
|
||||||
|
GEVENT_STRUCT_DONE _;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Watcher types
|
||||||
|
// base for all watchers
|
||||||
|
struct ev_watcher{
|
||||||
|
GEVENT_STRUCT_DONE _;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ev_io {
|
||||||
|
int fd;
|
||||||
|
int events;
|
||||||
|
void* data;
|
||||||
|
GEVENT_STRUCT_DONE _;
|
||||||
|
};
|
||||||
|
struct ev_timer {
|
||||||
|
double at;
|
||||||
|
void* data;
|
||||||
|
GEVENT_STRUCT_DONE _;
|
||||||
|
};
|
||||||
|
struct ev_signal {
|
||||||
|
void* data;
|
||||||
|
GEVENT_STRUCT_DONE _;
|
||||||
|
};
|
||||||
|
struct ev_idle {
|
||||||
|
void* data;
|
||||||
|
GEVENT_STRUCT_DONE _;
|
||||||
|
};
|
||||||
|
struct ev_prepare {
|
||||||
|
void* data;
|
||||||
|
GEVENT_STRUCT_DONE _;
|
||||||
|
};
|
||||||
|
struct ev_check {
|
||||||
|
void* data;
|
||||||
|
GEVENT_STRUCT_DONE _;
|
||||||
|
};
|
||||||
|
struct ev_fork {
|
||||||
|
void* data;
|
||||||
|
GEVENT_STRUCT_DONE _;
|
||||||
|
};
|
||||||
|
struct ev_async {
|
||||||
|
void* data;
|
||||||
|
GEVENT_STRUCT_DONE _;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ev_child {
|
||||||
|
int pid;
|
||||||
|
int rpid;
|
||||||
|
int rstatus;
|
||||||
|
void* data;
|
||||||
|
GEVENT_STRUCT_DONE _;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct stat {
|
||||||
|
GEVENT_ST_NLINK_T st_nlink;
|
||||||
|
GEVENT_STRUCT_DONE _;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ev_stat {
|
||||||
|
struct stat attr;
|
||||||
|
const char* path;
|
||||||
|
struct stat prev;
|
||||||
|
double interval;
|
||||||
|
void* data;
|
||||||
|
GEVENT_STRUCT_DONE _;
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef double ev_tstamp;
|
||||||
|
|
||||||
|
int ev_version_major();
|
||||||
|
int ev_version_minor();
|
||||||
|
|
||||||
|
unsigned int ev_supported_backends (void);
|
||||||
|
unsigned int ev_recommended_backends (void);
|
||||||
|
unsigned int ev_embeddable_backends (void);
|
||||||
|
|
||||||
|
ev_tstamp ev_time (void);
|
||||||
|
void ev_set_syserr_cb(void *);
|
||||||
|
|
||||||
|
int ev_priority(void*);
|
||||||
|
void ev_set_priority(void*, int);
|
||||||
|
|
||||||
|
int ev_is_pending(void*);
|
||||||
|
int ev_is_active(void*);
|
||||||
|
void ev_io_init(struct ev_io*, void* callback, int fd, int events);
|
||||||
|
void ev_io_start(struct ev_loop*, struct ev_io*);
|
||||||
|
void ev_io_stop(struct ev_loop*, struct ev_io*);
|
||||||
|
void ev_feed_event(struct ev_loop*, void*, int);
|
||||||
|
|
||||||
|
void ev_timer_init(struct ev_timer*, void *callback, double, double);
|
||||||
|
void ev_timer_start(struct ev_loop*, struct ev_timer*);
|
||||||
|
void ev_timer_stop(struct ev_loop*, struct ev_timer*);
|
||||||
|
void ev_timer_again(struct ev_loop*, struct ev_timer*);
|
||||||
|
|
||||||
|
void ev_signal_init(struct ev_signal*, void* callback, int);
|
||||||
|
void ev_signal_start(struct ev_loop*, struct ev_signal*);
|
||||||
|
void ev_signal_stop(struct ev_loop*, struct ev_signal*);
|
||||||
|
|
||||||
|
void ev_idle_init(struct ev_idle*, void* callback);
|
||||||
|
void ev_idle_start(struct ev_loop*, struct ev_idle*);
|
||||||
|
void ev_idle_stop(struct ev_loop*, struct ev_idle*);
|
||||||
|
|
||||||
|
void ev_prepare_init(struct ev_prepare*, void* callback);
|
||||||
|
void ev_prepare_start(struct ev_loop*, struct ev_prepare*);
|
||||||
|
void ev_prepare_stop(struct ev_loop*, struct ev_prepare*);
|
||||||
|
|
||||||
|
void ev_check_init(struct ev_check*, void* callback);
|
||||||
|
void ev_check_start(struct ev_loop*, struct ev_check*);
|
||||||
|
void ev_check_stop(struct ev_loop*, struct ev_check*);
|
||||||
|
|
||||||
|
void ev_fork_init(struct ev_fork*, void* callback);
|
||||||
|
void ev_fork_start(struct ev_loop*, struct ev_fork*);
|
||||||
|
void ev_fork_stop(struct ev_loop*, struct ev_fork*);
|
||||||
|
|
||||||
|
void ev_async_init(struct ev_async*, void* callback);
|
||||||
|
void ev_async_start(struct ev_loop*, struct ev_async*);
|
||||||
|
void ev_async_stop(struct ev_loop*, struct ev_async*);
|
||||||
|
void ev_async_send(struct ev_loop*, struct ev_async*);
|
||||||
|
int ev_async_pending(struct ev_async*);
|
||||||
|
|
||||||
|
void ev_child_init(struct ev_child*, void* callback, int, int);
|
||||||
|
void ev_child_start(struct ev_loop*, struct ev_child*);
|
||||||
|
void ev_child_stop(struct ev_loop*, struct ev_child*);
|
||||||
|
|
||||||
|
void ev_stat_init(struct ev_stat*, void* callback, char*, double);
|
||||||
|
void ev_stat_start(struct ev_loop*, struct ev_stat*);
|
||||||
|
void ev_stat_stop(struct ev_loop*, struct ev_stat*);
|
||||||
|
|
||||||
|
struct ev_loop *ev_default_loop (unsigned int flags);
|
||||||
|
struct ev_loop* ev_loop_new(unsigned int flags);
|
||||||
|
void ev_loop_destroy(struct ev_loop*);
|
||||||
|
void ev_loop_fork(struct ev_loop*);
|
||||||
|
int ev_is_default_loop (struct ev_loop *);
|
||||||
|
unsigned int ev_iteration(struct ev_loop*);
|
||||||
|
unsigned int ev_depth(struct ev_loop*);
|
||||||
|
unsigned int ev_backend(struct ev_loop*);
|
||||||
|
void ev_verify(struct ev_loop*);
|
||||||
|
void ev_run(struct ev_loop*, int flags);
|
||||||
|
|
||||||
|
ev_tstamp ev_now (struct ev_loop *);
|
||||||
|
void ev_now_update (struct ev_loop *); /* update event loop time */
|
||||||
|
void ev_ref(struct ev_loop*);
|
||||||
|
void ev_unref(struct ev_loop*);
|
||||||
|
void ev_break(struct ev_loop*, int);
|
||||||
|
unsigned int ev_pending_count(struct ev_loop*);
|
||||||
|
|
||||||
|
struct ev_loop* gevent_ev_default_loop(unsigned int flags);
|
||||||
|
void gevent_install_sigchld_handler();
|
||||||
|
void gevent_reset_sigchld_handler();
|
||||||
|
|
||||||
|
void (*gevent_noop)(struct ev_loop *_loop, struct ev_timer *w, int revents);
|
||||||
|
void ev_sleep (ev_tstamp delay); /* sleep for a while */
|
||||||
|
|
||||||
|
/* gevent callbacks */
|
||||||
|
static int (*python_callback)(void* handle, int revents);
|
||||||
|
static void (*python_handle_error)(void* handle, int revents);
|
||||||
|
static void (*python_stop)(void* handle);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We use a single C callback for every watcher type, which in turn calls the
|
||||||
|
* Python callbacks. The ev_watcher pointer type can be used for every watcher type
|
||||||
|
* because they all start with the same members---libev itself relies on this. Each
|
||||||
|
* watcher types has a 'void* data' that stores the CFFI handle to the Python watcher
|
||||||
|
* object.
|
||||||
|
*/
|
||||||
|
static void _gevent_generic_callback(struct ev_loop* loop, struct ev_watcher* watcher, int revents);
|
||||||
45
python/gevent/libev/_corecffi_source.c
Normal file
45
python/gevent/libev/_corecffi_source.c
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
// passed to the real C compiler
|
||||||
|
#define LIBEV_EMBED 1
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
#define EV_STANDALONE 1
|
||||||
|
#include "libev_vfd.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
#include "libev.h"
|
||||||
|
|
||||||
|
static void
|
||||||
|
_gevent_noop(struct ev_loop *_loop, struct ev_timer *w, int revents) { }
|
||||||
|
|
||||||
|
void (*gevent_noop)(struct ev_loop *, struct ev_timer *, int) = &_gevent_noop;
|
||||||
|
static int (*python_callback)(void* handle, int revents);
|
||||||
|
static void (*python_handle_error)(void* handle, int revents);
|
||||||
|
static void (*python_stop)(void* handle);
|
||||||
|
|
||||||
|
static void _gevent_generic_callback(struct ev_loop* loop,
|
||||||
|
struct ev_watcher* watcher,
|
||||||
|
int revents)
|
||||||
|
{
|
||||||
|
void* handle = watcher->data;
|
||||||
|
int cb_result = python_callback(handle, revents);
|
||||||
|
switch(cb_result) {
|
||||||
|
case -1:
|
||||||
|
// in case of exception, call self.loop.handle_error;
|
||||||
|
// this function is also responsible for stopping the watcher
|
||||||
|
// and allowing memory to be freed
|
||||||
|
python_handle_error(handle, revents);
|
||||||
|
break;
|
||||||
|
case 0:
|
||||||
|
// Code to stop the event. Note that if python_callback
|
||||||
|
// has disposed of the last reference to the handle,
|
||||||
|
// `watcher` could now be invalid/disposed memory!
|
||||||
|
if (!ev_is_active(watcher)) {
|
||||||
|
python_stop(handle);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
assert(cb_result == 1);
|
||||||
|
// watcher is already stopped and dead, nothing to do.
|
||||||
|
}
|
||||||
|
}
|
||||||
225
python/gevent/libev/callbacks.c
Normal file
225
python/gevent/libev/callbacks.c
Normal file
@@ -0,0 +1,225 @@
|
|||||||
|
/* Copyright (c) 2011-2012 Denis Bilenko. See LICENSE for details. */
|
||||||
|
#ifdef Py_PYTHON_H
|
||||||
|
|
||||||
|
/* the name changes depending on our file layout and --module-name option */
|
||||||
|
#define _GEVENTLOOP struct __pyx_vtabstruct_6gevent_5libev_8corecext_loop
|
||||||
|
|
||||||
|
|
||||||
|
static void gevent_handle_error(struct PyGeventLoopObject* loop, PyObject* context) {
|
||||||
|
PyThreadState *tstate;
|
||||||
|
PyObject *type, *value, *traceback, *result;
|
||||||
|
tstate = PyThreadState_GET();
|
||||||
|
type = tstate->curexc_type;
|
||||||
|
if (!type)
|
||||||
|
return;
|
||||||
|
value = tstate->curexc_value;
|
||||||
|
traceback = tstate->curexc_traceback;
|
||||||
|
if (!value) value = Py_None;
|
||||||
|
if (!traceback) traceback = Py_None;
|
||||||
|
|
||||||
|
Py_INCREF(type);
|
||||||
|
Py_INCREF(value);
|
||||||
|
Py_INCREF(traceback);
|
||||||
|
|
||||||
|
PyErr_Clear();
|
||||||
|
|
||||||
|
result = ((_GEVENTLOOP *)loop->__pyx_vtab)->handle_error(loop, context, type, value, traceback, 0);
|
||||||
|
|
||||||
|
if (result) {
|
||||||
|
Py_DECREF(result);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
PyErr_Print();
|
||||||
|
PyErr_Clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
Py_DECREF(type);
|
||||||
|
Py_DECREF(value);
|
||||||
|
Py_DECREF(traceback);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static CYTHON_INLINE void gevent_check_signals(struct PyGeventLoopObject* loop) {
|
||||||
|
if (!ev_is_default_loop(loop->_ptr)) {
|
||||||
|
/* only reporting signals on the default loop */
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
PyErr_CheckSignals();
|
||||||
|
if (PyErr_Occurred()) gevent_handle_error(loop, Py_None);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define GET_OBJECT(PY_TYPE, EV_PTR, MEMBER) \
|
||||||
|
((struct PY_TYPE *)(((char *)EV_PTR) - offsetof(struct PY_TYPE, MEMBER)))
|
||||||
|
|
||||||
|
|
||||||
|
#ifdef WITH_THREAD
|
||||||
|
#define GIL_DECLARE PyGILState_STATE ___save
|
||||||
|
#define GIL_ENSURE ___save = PyGILState_Ensure();
|
||||||
|
#define GIL_RELEASE PyGILState_Release(___save);
|
||||||
|
#else
|
||||||
|
#define GIL_DECLARE
|
||||||
|
#define GIL_ENSURE
|
||||||
|
#define GIL_RELEASE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
static void gevent_stop(PyObject* watcher, struct PyGeventLoopObject* loop) {
|
||||||
|
PyObject *result, *method;
|
||||||
|
int error;
|
||||||
|
error = 1;
|
||||||
|
method = PyObject_GetAttrString(watcher, "stop");
|
||||||
|
if (method) {
|
||||||
|
result = PyObject_Call(method, __pyx_empty_tuple, NULL);
|
||||||
|
if (result) {
|
||||||
|
Py_DECREF(result);
|
||||||
|
error = 0;
|
||||||
|
}
|
||||||
|
Py_DECREF(method);
|
||||||
|
}
|
||||||
|
if (error) {
|
||||||
|
gevent_handle_error(loop, watcher);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void gevent_callback(struct PyGeventLoopObject* loop, PyObject* callback, PyObject* args, PyObject* watcher, void *c_watcher, int revents) {
|
||||||
|
GIL_DECLARE;
|
||||||
|
PyObject *result, *py_events;
|
||||||
|
long length;
|
||||||
|
py_events = 0;
|
||||||
|
GIL_ENSURE;
|
||||||
|
Py_INCREF(loop);
|
||||||
|
Py_INCREF(callback);
|
||||||
|
Py_INCREF(args);
|
||||||
|
Py_INCREF(watcher);
|
||||||
|
gevent_check_signals(loop);
|
||||||
|
if (args == Py_None) {
|
||||||
|
args = __pyx_empty_tuple;
|
||||||
|
}
|
||||||
|
length = PyTuple_Size(args);
|
||||||
|
if (length < 0) {
|
||||||
|
gevent_handle_error(loop, watcher);
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
if (length > 0 && PyTuple_GET_ITEM(args, 0) == GEVENT_CORE_EVENTS) {
|
||||||
|
py_events = PyInt_FromLong(revents);
|
||||||
|
if (!py_events) {
|
||||||
|
gevent_handle_error(loop, watcher);
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
PyTuple_SET_ITEM(args, 0, py_events);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
py_events = NULL;
|
||||||
|
}
|
||||||
|
result = PyObject_Call(callback, args, NULL);
|
||||||
|
if (result) {
|
||||||
|
Py_DECREF(result);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
gevent_handle_error(loop, watcher);
|
||||||
|
if (revents & (EV_READ|EV_WRITE)) {
|
||||||
|
/* io watcher: not stopping it may cause the failing callback to be called repeatedly */
|
||||||
|
gevent_stop(watcher, loop);
|
||||||
|
goto end;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!ev_is_active(c_watcher)) {
|
||||||
|
/* Watcher was stopped, maybe by libev. Let's call stop() to clean up
|
||||||
|
* 'callback' and 'args' properties, do Py_DECREF() and ev_ref() if necessary.
|
||||||
|
* BTW, we don't need to check for EV_ERROR, because libev stops the watcher in that case. */
|
||||||
|
gevent_stop(watcher, loop);
|
||||||
|
}
|
||||||
|
end:
|
||||||
|
if (py_events) {
|
||||||
|
Py_DECREF(py_events);
|
||||||
|
PyTuple_SET_ITEM(args, 0, GEVENT_CORE_EVENTS);
|
||||||
|
}
|
||||||
|
Py_DECREF(watcher);
|
||||||
|
Py_DECREF(args);
|
||||||
|
Py_DECREF(callback);
|
||||||
|
Py_DECREF(loop);
|
||||||
|
GIL_RELEASE;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void gevent_call(struct PyGeventLoopObject* loop, struct PyGeventCallbackObject* cb) {
|
||||||
|
/* no need for GIL here because it is only called from run_callbacks which already has GIL */
|
||||||
|
PyObject *result, *callback, *args;
|
||||||
|
if (!loop || !cb)
|
||||||
|
return;
|
||||||
|
callback = cb->callback;
|
||||||
|
args = cb->args;
|
||||||
|
if (!callback || !args)
|
||||||
|
return;
|
||||||
|
if (callback == Py_None || args == Py_None)
|
||||||
|
return;
|
||||||
|
Py_INCREF(loop);
|
||||||
|
Py_INCREF(callback);
|
||||||
|
Py_INCREF(args);
|
||||||
|
|
||||||
|
Py_INCREF(Py_None);
|
||||||
|
Py_DECREF(cb->callback);
|
||||||
|
cb->callback = Py_None;
|
||||||
|
|
||||||
|
result = PyObject_Call(callback, args, NULL);
|
||||||
|
if (result) {
|
||||||
|
Py_DECREF(result);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
gevent_handle_error(loop, (PyObject*)cb);
|
||||||
|
}
|
||||||
|
|
||||||
|
Py_INCREF(Py_None);
|
||||||
|
Py_DECREF(cb->args);
|
||||||
|
cb->args = Py_None;
|
||||||
|
|
||||||
|
Py_DECREF(callback);
|
||||||
|
Py_DECREF(args);
|
||||||
|
Py_DECREF(loop);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#undef DEFINE_CALLBACK
|
||||||
|
#define DEFINE_CALLBACK(WATCHER_LC, WATCHER_TYPE) \
|
||||||
|
static void gevent_callback_##WATCHER_LC(struct ev_loop *_loop, void *c_watcher, int revents) { \
|
||||||
|
struct PyGevent##WATCHER_TYPE##Object* watcher = GET_OBJECT(PyGevent##WATCHER_TYPE##Object, c_watcher, _watcher); \
|
||||||
|
gevent_callback(watcher->loop, watcher->_callback, watcher->args, (PyObject*)watcher, c_watcher, revents); \
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
DEFINE_CALLBACKS
|
||||||
|
|
||||||
|
|
||||||
|
static void gevent_run_callbacks(struct ev_loop *_loop, void *watcher, int revents) {
|
||||||
|
struct PyGeventLoopObject* loop;
|
||||||
|
PyObject *result;
|
||||||
|
GIL_DECLARE;
|
||||||
|
GIL_ENSURE;
|
||||||
|
loop = GET_OBJECT(PyGeventLoopObject, watcher, _prepare);
|
||||||
|
Py_INCREF(loop);
|
||||||
|
gevent_check_signals(loop);
|
||||||
|
result = ((_GEVENTLOOP *)loop->__pyx_vtab)->_run_callbacks(loop);
|
||||||
|
if (result) {
|
||||||
|
Py_DECREF(result);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
PyErr_Print();
|
||||||
|
PyErr_Clear();
|
||||||
|
}
|
||||||
|
Py_DECREF(loop);
|
||||||
|
GIL_RELEASE;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(_WIN32)
|
||||||
|
|
||||||
|
static void gevent_periodic_signal_check(struct ev_loop *_loop, void *watcher, int revents) {
|
||||||
|
GIL_DECLARE;
|
||||||
|
GIL_ENSURE;
|
||||||
|
gevent_check_signals(GET_OBJECT(PyGeventLoopObject, watcher, _periodic_signal_checker));
|
||||||
|
GIL_RELEASE;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* _WIN32 */
|
||||||
|
|
||||||
|
#endif /* Py_PYTHON_H */
|
||||||
43
python/gevent/libev/callbacks.h
Normal file
43
python/gevent/libev/callbacks.h
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
#define DEFINE_CALLBACK(WATCHER_LC, WATCHER_TYPE) \
|
||||||
|
static void gevent_callback_##WATCHER_LC(struct ev_loop *, void *, int);
|
||||||
|
|
||||||
|
|
||||||
|
#define DEFINE_CALLBACKS0 \
|
||||||
|
DEFINE_CALLBACK(io, IO); \
|
||||||
|
DEFINE_CALLBACK(timer, Timer); \
|
||||||
|
DEFINE_CALLBACK(signal, Signal); \
|
||||||
|
DEFINE_CALLBACK(idle, Idle); \
|
||||||
|
DEFINE_CALLBACK(prepare, Prepare); \
|
||||||
|
DEFINE_CALLBACK(check, Check); \
|
||||||
|
DEFINE_CALLBACK(fork, Fork); \
|
||||||
|
DEFINE_CALLBACK(async, Async); \
|
||||||
|
DEFINE_CALLBACK(stat, Stat);
|
||||||
|
|
||||||
|
|
||||||
|
#ifndef _WIN32
|
||||||
|
|
||||||
|
#define DEFINE_CALLBACKS \
|
||||||
|
DEFINE_CALLBACKS0 \
|
||||||
|
DEFINE_CALLBACK(child, Child)
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#define DEFINE_CALLBACKS DEFINE_CALLBACKS0
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
DEFINE_CALLBACKS
|
||||||
|
|
||||||
|
|
||||||
|
static void gevent_run_callbacks(struct ev_loop *, void *, int);
|
||||||
|
struct PyGeventLoopObject;
|
||||||
|
static void gevent_handle_error(struct PyGeventLoopObject* loop, PyObject* context);
|
||||||
|
struct PyGeventCallbackObject;
|
||||||
|
static void gevent_call(struct PyGeventLoopObject* loop, struct PyGeventCallbackObject* cb);
|
||||||
|
|
||||||
|
#if defined(_WIN32)
|
||||||
|
static void gevent_periodic_signal_check(struct ev_loop *, void *, int);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static void gevent_noop(struct ev_loop *_loop, void *watcher, int revents) { }
|
||||||
1134
python/gevent/libev/corecext.ppyx
Normal file
1134
python/gevent/libev/corecext.ppyx
Normal file
File diff suppressed because it is too large
Load Diff
2110
python/gevent/libev/corecext.pyx
Normal file
2110
python/gevent/libev/corecext.pyx
Normal file
File diff suppressed because it is too large
Load Diff
1132
python/gevent/libev/corecffi.py
Normal file
1132
python/gevent/libev/corecffi.py
Normal file
File diff suppressed because it is too large
Load Diff
33465
python/gevent/libev/gevent.corecext.c
Normal file
33465
python/gevent/libev/gevent.corecext.c
Normal file
File diff suppressed because it is too large
Load Diff
66
python/gevent/libev/libev.h
Normal file
66
python/gevent/libev/libev.h
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
#if defined(LIBEV_EMBED)
|
||||||
|
#include "ev.c"
|
||||||
|
#else
|
||||||
|
#include "ev.h"
|
||||||
|
|
||||||
|
#ifndef _WIN32
|
||||||
|
#include <signal.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef _WIN32
|
||||||
|
|
||||||
|
static struct sigaction libev_sigchld;
|
||||||
|
/*
|
||||||
|
* Track the state of whether we have installed
|
||||||
|
* the libev sigchld handler specifically.
|
||||||
|
* If it's non-zero, libev_sigchld will be valid and set to the action
|
||||||
|
* that libev needs to do.
|
||||||
|
* If it's 1, we need to install libev_sigchld to make libev
|
||||||
|
* child handlers work (on request).
|
||||||
|
*/
|
||||||
|
static int sigchld_state = 0;
|
||||||
|
|
||||||
|
static struct ev_loop* gevent_ev_default_loop(unsigned int flags)
|
||||||
|
{
|
||||||
|
struct ev_loop* result;
|
||||||
|
struct sigaction tmp;
|
||||||
|
|
||||||
|
if (sigchld_state)
|
||||||
|
return ev_default_loop(flags);
|
||||||
|
|
||||||
|
// Request the old SIGCHLD handler
|
||||||
|
sigaction(SIGCHLD, NULL, &tmp);
|
||||||
|
// Get the loop, which will install a SIGCHLD handler
|
||||||
|
result = ev_default_loop(flags);
|
||||||
|
// XXX what if SIGCHLD received there?
|
||||||
|
// Now restore the previous SIGCHLD handler
|
||||||
|
sigaction(SIGCHLD, &tmp, &libev_sigchld);
|
||||||
|
sigchld_state = 1;
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void gevent_install_sigchld_handler(void) {
|
||||||
|
if (sigchld_state == 1) {
|
||||||
|
sigaction(SIGCHLD, &libev_sigchld, NULL);
|
||||||
|
sigchld_state = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void gevent_reset_sigchld_handler(void) {
|
||||||
|
// We could have any state at this point, depending on
|
||||||
|
// whether the default loop has been used. If it has,
|
||||||
|
// then always be in state 1 ("need to install)
|
||||||
|
if (sigchld_state) {
|
||||||
|
sigchld_state = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#define gevent_ev_default_loop ev_default_loop
|
||||||
|
static void gevent_install_sigchld_handler(void) { }
|
||||||
|
|
||||||
|
#endif
|
||||||
208
python/gevent/libev/libev.pxd
Normal file
208
python/gevent/libev/libev.pxd
Normal file
@@ -0,0 +1,208 @@
|
|||||||
|
cdef extern from "libev_vfd.h":
|
||||||
|
#ifdef _WIN32
|
||||||
|
#ifdef _WIN64
|
||||||
|
ctypedef long long vfd_socket_t
|
||||||
|
#else
|
||||||
|
ctypedef long vfd_socket_t
|
||||||
|
#endif
|
||||||
|
#else
|
||||||
|
ctypedef int vfd_socket_t
|
||||||
|
#endif
|
||||||
|
long vfd_get(int)
|
||||||
|
int vfd_open(long) except -1
|
||||||
|
void vfd_free(int)
|
||||||
|
|
||||||
|
cdef extern from "libev.h":
|
||||||
|
int EV_MINPRI
|
||||||
|
int EV_MAXPRI
|
||||||
|
|
||||||
|
int EV_VERSION_MAJOR
|
||||||
|
int EV_VERSION_MINOR
|
||||||
|
|
||||||
|
int EV_USE_FLOOR
|
||||||
|
int EV_USE_CLOCK_SYSCALL
|
||||||
|
int EV_USE_REALTIME
|
||||||
|
int EV_USE_MONOTONIC
|
||||||
|
int EV_USE_NANOSLEEP
|
||||||
|
int EV_USE_SELECT
|
||||||
|
int EV_USE_POLL
|
||||||
|
int EV_USE_EPOLL
|
||||||
|
int EV_USE_KQUEUE
|
||||||
|
int EV_USE_PORT
|
||||||
|
int EV_USE_INOTIFY
|
||||||
|
int EV_USE_SIGNALFD
|
||||||
|
int EV_USE_EVENTFD
|
||||||
|
int EV_USE_4HEAP
|
||||||
|
int EV_USE_IOCP
|
||||||
|
int EV_SELECT_IS_WINSOCKET
|
||||||
|
|
||||||
|
int EV_UNDEF
|
||||||
|
int EV_NONE
|
||||||
|
int EV_READ
|
||||||
|
int EV_WRITE
|
||||||
|
int EV__IOFDSET
|
||||||
|
int EV_TIMER
|
||||||
|
int EV_PERIODIC
|
||||||
|
int EV_SIGNAL
|
||||||
|
int EV_CHILD
|
||||||
|
int EV_STAT
|
||||||
|
int EV_IDLE
|
||||||
|
int EV_PREPARE
|
||||||
|
int EV_CHECK
|
||||||
|
int EV_EMBED
|
||||||
|
int EV_FORK
|
||||||
|
int EV_CLEANUP
|
||||||
|
int EV_ASYNC
|
||||||
|
int EV_CUSTOM
|
||||||
|
int EV_ERROR
|
||||||
|
|
||||||
|
int EVFLAG_AUTO
|
||||||
|
int EVFLAG_NOENV
|
||||||
|
int EVFLAG_FORKCHECK
|
||||||
|
int EVFLAG_NOINOTIFY
|
||||||
|
int EVFLAG_SIGNALFD
|
||||||
|
int EVFLAG_NOSIGMASK
|
||||||
|
|
||||||
|
int EVBACKEND_SELECT
|
||||||
|
int EVBACKEND_POLL
|
||||||
|
int EVBACKEND_EPOLL
|
||||||
|
int EVBACKEND_KQUEUE
|
||||||
|
int EVBACKEND_DEVPOLL
|
||||||
|
int EVBACKEND_PORT
|
||||||
|
int EVBACKEND_IOCP
|
||||||
|
int EVBACKEND_ALL
|
||||||
|
int EVBACKEND_MASK
|
||||||
|
|
||||||
|
int EVRUN_NOWAIT
|
||||||
|
int EVRUN_ONCE
|
||||||
|
|
||||||
|
int EVBREAK_CANCEL
|
||||||
|
int EVBREAK_ONE
|
||||||
|
int EVBREAK_ALL
|
||||||
|
|
||||||
|
struct ev_loop:
|
||||||
|
int activecnt
|
||||||
|
int sig_pending
|
||||||
|
int backend_fd
|
||||||
|
int sigfd
|
||||||
|
unsigned int origflags
|
||||||
|
|
||||||
|
struct ev_io:
|
||||||
|
int fd
|
||||||
|
int events
|
||||||
|
|
||||||
|
struct ev_timer:
|
||||||
|
double at
|
||||||
|
|
||||||
|
struct ev_signal:
|
||||||
|
pass
|
||||||
|
|
||||||
|
struct ev_idle:
|
||||||
|
pass
|
||||||
|
|
||||||
|
struct ev_prepare:
|
||||||
|
pass
|
||||||
|
|
||||||
|
struct ev_check:
|
||||||
|
pass
|
||||||
|
|
||||||
|
struct ev_fork:
|
||||||
|
pass
|
||||||
|
|
||||||
|
struct ev_async:
|
||||||
|
pass
|
||||||
|
|
||||||
|
struct ev_child:
|
||||||
|
int pid
|
||||||
|
int rpid
|
||||||
|
int rstatus
|
||||||
|
|
||||||
|
struct stat:
|
||||||
|
int st_nlink
|
||||||
|
|
||||||
|
struct ev_stat:
|
||||||
|
stat attr
|
||||||
|
stat prev
|
||||||
|
double interval
|
||||||
|
|
||||||
|
int ev_version_major()
|
||||||
|
int ev_version_minor()
|
||||||
|
|
||||||
|
unsigned int ev_supported_backends()
|
||||||
|
unsigned int ev_recommended_backends()
|
||||||
|
unsigned int ev_embeddable_backends()
|
||||||
|
|
||||||
|
double ev_time()
|
||||||
|
void ev_set_syserr_cb(void *)
|
||||||
|
|
||||||
|
int ev_priority(void*)
|
||||||
|
void ev_set_priority(void*, int)
|
||||||
|
|
||||||
|
int ev_is_pending(void*)
|
||||||
|
int ev_is_active(void*)
|
||||||
|
void ev_io_init(ev_io*, void* callback, int fd, int events)
|
||||||
|
void ev_io_start(ev_loop*, ev_io*)
|
||||||
|
void ev_io_stop(ev_loop*, ev_io*)
|
||||||
|
void ev_feed_event(ev_loop*, void*, int)
|
||||||
|
|
||||||
|
void ev_timer_init(ev_timer*, void* callback, double, double)
|
||||||
|
void ev_timer_start(ev_loop*, ev_timer*)
|
||||||
|
void ev_timer_stop(ev_loop*, ev_timer*)
|
||||||
|
void ev_timer_again(ev_loop*, ev_timer*)
|
||||||
|
|
||||||
|
void ev_signal_init(ev_signal*, void* callback, int)
|
||||||
|
void ev_signal_start(ev_loop*, ev_signal*)
|
||||||
|
void ev_signal_stop(ev_loop*, ev_signal*)
|
||||||
|
|
||||||
|
void ev_idle_init(ev_idle*, void* callback)
|
||||||
|
void ev_idle_start(ev_loop*, ev_idle*)
|
||||||
|
void ev_idle_stop(ev_loop*, ev_idle*)
|
||||||
|
|
||||||
|
void ev_prepare_init(ev_prepare*, void* callback)
|
||||||
|
void ev_prepare_start(ev_loop*, ev_prepare*)
|
||||||
|
void ev_prepare_stop(ev_loop*, ev_prepare*)
|
||||||
|
|
||||||
|
void ev_check_init(ev_check*, void* callback)
|
||||||
|
void ev_check_start(ev_loop*, ev_check*)
|
||||||
|
void ev_check_stop(ev_loop*, ev_check*)
|
||||||
|
|
||||||
|
void ev_fork_init(ev_fork*, void* callback)
|
||||||
|
void ev_fork_start(ev_loop*, ev_fork*)
|
||||||
|
void ev_fork_stop(ev_loop*, ev_fork*)
|
||||||
|
|
||||||
|
void ev_async_init(ev_async*, void* callback)
|
||||||
|
void ev_async_start(ev_loop*, ev_async*)
|
||||||
|
void ev_async_stop(ev_loop*, ev_async*)
|
||||||
|
void ev_async_send(ev_loop*, ev_async*)
|
||||||
|
int ev_async_pending(ev_async*)
|
||||||
|
|
||||||
|
void ev_child_init(ev_child*, void* callback, int, int)
|
||||||
|
void ev_child_start(ev_loop*, ev_child*)
|
||||||
|
void ev_child_stop(ev_loop*, ev_child*)
|
||||||
|
|
||||||
|
void ev_stat_init(ev_stat*, void* callback, char*, double)
|
||||||
|
void ev_stat_start(ev_loop*, ev_stat*)
|
||||||
|
void ev_stat_stop(ev_loop*, ev_stat*)
|
||||||
|
|
||||||
|
ev_loop* ev_default_loop(unsigned int flags)
|
||||||
|
ev_loop* ev_loop_new(unsigned int flags)
|
||||||
|
void ev_loop_destroy(ev_loop*)
|
||||||
|
void ev_loop_fork(ev_loop*)
|
||||||
|
int ev_is_default_loop(ev_loop*)
|
||||||
|
unsigned int ev_iteration(ev_loop*)
|
||||||
|
unsigned int ev_depth(ev_loop*)
|
||||||
|
unsigned int ev_backend(ev_loop*)
|
||||||
|
void ev_verify(ev_loop*)
|
||||||
|
void ev_run(ev_loop*, int flags) nogil
|
||||||
|
|
||||||
|
double ev_now(ev_loop*)
|
||||||
|
void ev_now_update(ev_loop*)
|
||||||
|
|
||||||
|
void ev_ref(ev_loop*)
|
||||||
|
void ev_unref(ev_loop*)
|
||||||
|
void ev_break(ev_loop*, int)
|
||||||
|
unsigned int ev_pending_count(ev_loop*)
|
||||||
|
|
||||||
|
ev_loop* gevent_ev_default_loop(unsigned int flags)
|
||||||
|
void gevent_install_sigchld_handler()
|
||||||
|
void gevent_reset_sigchld_handler()
|
||||||
223
python/gevent/libev/libev_vfd.h
Normal file
223
python/gevent/libev/libev_vfd.h
Normal file
@@ -0,0 +1,223 @@
|
|||||||
|
#ifdef _WIN32
|
||||||
|
#ifdef _WIN64
|
||||||
|
typedef PY_LONG_LONG vfd_socket_t;
|
||||||
|
#define vfd_socket_object PyLong_FromLongLong
|
||||||
|
#else
|
||||||
|
typedef long vfd_socket_t;
|
||||||
|
#define vfd_socket_object PyInt_FromLong
|
||||||
|
#endif
|
||||||
|
#ifdef LIBEV_EMBED
|
||||||
|
/*
|
||||||
|
* If libev on win32 is embedded, then we can use an
|
||||||
|
* arbitrary mapping between integer fds and OS
|
||||||
|
* handles. Then by defining special macros libev
|
||||||
|
* will use our functions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define WIN32_LEAN_AND_MEAN
|
||||||
|
#include <winsock2.h>
|
||||||
|
#include <windows.h>
|
||||||
|
|
||||||
|
typedef struct vfd_entry_t
|
||||||
|
{
|
||||||
|
vfd_socket_t handle; /* OS handle, i.e. SOCKET */
|
||||||
|
int count; /* Reference count, 0 if free */
|
||||||
|
int next; /* Next free fd, -1 if last */
|
||||||
|
} vfd_entry;
|
||||||
|
|
||||||
|
#define VFD_INCREMENT 128
|
||||||
|
static int vfd_num = 0; /* num allocated fds */
|
||||||
|
static int vfd_max = 0; /* max allocated fds */
|
||||||
|
static int vfd_next = -1; /* next free fd for reuse */
|
||||||
|
static PyObject* vfd_map = NULL; /* map OS handle -> virtual fd */
|
||||||
|
static vfd_entry* vfd_entries = NULL; /* list of virtual fd entries */
|
||||||
|
|
||||||
|
#ifdef WITH_THREAD
|
||||||
|
static CRITICAL_SECTION* volatile vfd_lock = NULL;
|
||||||
|
static CRITICAL_SECTION* vfd_make_lock()
|
||||||
|
{
|
||||||
|
if (vfd_lock == NULL) {
|
||||||
|
/* must use malloc and not PyMem_Malloc here */
|
||||||
|
CRITICAL_SECTION* lock = malloc(sizeof(CRITICAL_SECTION));
|
||||||
|
InitializeCriticalSection(lock);
|
||||||
|
if (InterlockedCompareExchangePointer(&vfd_lock, lock, NULL) != NULL) {
|
||||||
|
/* another thread initialized lock first */
|
||||||
|
DeleteCriticalSection(lock);
|
||||||
|
free(lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vfd_lock;
|
||||||
|
}
|
||||||
|
#define VFD_LOCK_ENTER EnterCriticalSection(vfd_make_lock())
|
||||||
|
#define VFD_LOCK_LEAVE LeaveCriticalSection(vfd_lock)
|
||||||
|
#define VFD_GIL_DECLARE PyGILState_STATE ___save
|
||||||
|
#define VFD_GIL_ENSURE ___save = PyGILState_Ensure()
|
||||||
|
#define VFD_GIL_RELEASE PyGILState_Release(___save)
|
||||||
|
#else
|
||||||
|
#define VFD_LOCK_ENTER
|
||||||
|
#define VFD_LOCK_LEAVE
|
||||||
|
#define VFD_GIL_DECLARE
|
||||||
|
#define VFD_GIL_ENSURE
|
||||||
|
#define VFD_GIL_RELEASE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Given a virtual fd returns an OS handle or -1
|
||||||
|
* This function is speed critical, so it cannot use GIL
|
||||||
|
*/
|
||||||
|
static vfd_socket_t vfd_get(int fd)
|
||||||
|
{
|
||||||
|
int handle = -1;
|
||||||
|
VFD_LOCK_ENTER;
|
||||||
|
if (vfd_entries != NULL && fd >= 0 && fd < vfd_num)
|
||||||
|
handle = vfd_entries[fd].handle;
|
||||||
|
VFD_LOCK_LEAVE;
|
||||||
|
return handle;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define EV_FD_TO_WIN32_HANDLE(fd) vfd_get((fd))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Given an OS handle finds or allocates a virtual fd
|
||||||
|
* Returns -1 on failure and sets Python exception if pyexc is non-zero
|
||||||
|
*/
|
||||||
|
static int vfd_open_(vfd_socket_t handle, int pyexc)
|
||||||
|
{
|
||||||
|
VFD_GIL_DECLARE;
|
||||||
|
int fd = -1;
|
||||||
|
unsigned long arg;
|
||||||
|
PyObject* key = NULL;
|
||||||
|
PyObject* value;
|
||||||
|
|
||||||
|
if (!pyexc) {
|
||||||
|
VFD_GIL_ENSURE;
|
||||||
|
}
|
||||||
|
if (ioctlsocket(handle, FIONREAD, &arg) != 0) {
|
||||||
|
if (pyexc)
|
||||||
|
PyErr_Format(PyExc_IOError,
|
||||||
|
#ifdef _WIN64
|
||||||
|
"%lld is not a socket (files are not supported)",
|
||||||
|
#else
|
||||||
|
"%ld is not a socket (files are not supported)",
|
||||||
|
#endif
|
||||||
|
handle);
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
if (vfd_map == NULL) {
|
||||||
|
vfd_map = PyDict_New();
|
||||||
|
if (vfd_map == NULL)
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
key = vfd_socket_object(handle);
|
||||||
|
/* check if it's already in the dict */
|
||||||
|
value = PyDict_GetItem(vfd_map, key);
|
||||||
|
if (value != NULL) {
|
||||||
|
/* is it safe to use PyInt_AS_LONG(value) here? */
|
||||||
|
fd = PyInt_AsLong(value);
|
||||||
|
if (fd >= 0) {
|
||||||
|
++vfd_entries[fd].count;
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/* use the free entry, if available */
|
||||||
|
if (vfd_next >= 0) {
|
||||||
|
fd = vfd_next;
|
||||||
|
vfd_next = vfd_entries[fd].next;
|
||||||
|
VFD_LOCK_ENTER;
|
||||||
|
goto allocated;
|
||||||
|
}
|
||||||
|
/* check if it would be out of bounds */
|
||||||
|
if (vfd_num >= FD_SETSIZE) {
|
||||||
|
/* libev's select doesn't support more that FD_SETSIZE fds */
|
||||||
|
if (pyexc)
|
||||||
|
PyErr_Format(PyExc_IOError, "cannot watch more than %d sockets", (int)FD_SETSIZE);
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
/* allocate more space if needed */
|
||||||
|
VFD_LOCK_ENTER;
|
||||||
|
if (vfd_num >= vfd_max) {
|
||||||
|
int newsize = vfd_max + VFD_INCREMENT;
|
||||||
|
vfd_entry* entries = PyMem_Realloc(vfd_entries, sizeof(vfd_entry) * newsize);
|
||||||
|
if (entries == NULL) {
|
||||||
|
VFD_LOCK_LEAVE;
|
||||||
|
if (pyexc)
|
||||||
|
PyErr_NoMemory();
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
vfd_entries = entries;
|
||||||
|
vfd_max = newsize;
|
||||||
|
}
|
||||||
|
fd = vfd_num++;
|
||||||
|
allocated:
|
||||||
|
/* vfd_lock must be acquired when entering here */
|
||||||
|
vfd_entries[fd].handle = handle;
|
||||||
|
vfd_entries[fd].count = 1;
|
||||||
|
VFD_LOCK_LEAVE;
|
||||||
|
value = PyInt_FromLong(fd);
|
||||||
|
PyDict_SetItem(vfd_map, key, value);
|
||||||
|
Py_DECREF(value);
|
||||||
|
done:
|
||||||
|
Py_XDECREF(key);
|
||||||
|
if (!pyexc) {
|
||||||
|
VFD_GIL_RELEASE;
|
||||||
|
}
|
||||||
|
return fd;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define vfd_open(fd) vfd_open_((fd), 1)
|
||||||
|
#define EV_WIN32_HANDLE_TO_FD(handle) vfd_open_((handle), 0)
|
||||||
|
|
||||||
|
static void vfd_free_(int fd, int needclose)
|
||||||
|
{
|
||||||
|
VFD_GIL_DECLARE;
|
||||||
|
PyObject* key;
|
||||||
|
|
||||||
|
if (needclose) {
|
||||||
|
VFD_GIL_ENSURE;
|
||||||
|
}
|
||||||
|
if (fd < 0 || fd >= vfd_num)
|
||||||
|
goto done; /* out of bounds */
|
||||||
|
if (vfd_entries[fd].count <= 0)
|
||||||
|
goto done; /* free entry, ignore */
|
||||||
|
if (!--vfd_entries[fd].count) {
|
||||||
|
/* fd has just been freed */
|
||||||
|
vfd_socket_t handle = vfd_entries[fd].handle;
|
||||||
|
vfd_entries[fd].handle = -1;
|
||||||
|
vfd_entries[fd].next = vfd_next;
|
||||||
|
vfd_next = fd;
|
||||||
|
if (needclose)
|
||||||
|
closesocket(handle);
|
||||||
|
/* vfd_map is assumed to be != NULL */
|
||||||
|
key = vfd_socket_object(handle);
|
||||||
|
PyDict_DelItem(vfd_map, key);
|
||||||
|
Py_DECREF(key);
|
||||||
|
}
|
||||||
|
done:
|
||||||
|
if (needclose) {
|
||||||
|
VFD_GIL_RELEASE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#define vfd_free(fd) vfd_free_((fd), 0)
|
||||||
|
#define EV_WIN32_CLOSE_FD(fd) vfd_free_((fd), 1)
|
||||||
|
|
||||||
|
#else
|
||||||
|
/*
|
||||||
|
* If libev on win32 is not embedded in gevent, then
|
||||||
|
* the only way to map vfds is to use the default of
|
||||||
|
* using runtime fds in libev. Note that it will leak
|
||||||
|
* fds, because there's no way of closing them safely
|
||||||
|
*/
|
||||||
|
#define vfd_get(fd) _get_osfhandle((fd))
|
||||||
|
#define vfd_open(fd) _open_osfhandle((fd), 0)
|
||||||
|
#define vfd_free(fd)
|
||||||
|
#endif
|
||||||
|
#else
|
||||||
|
/*
|
||||||
|
* On non-win32 platforms vfd_* are noop macros
|
||||||
|
*/
|
||||||
|
typedef int vfd_socket_t;
|
||||||
|
#define vfd_get(fd) (fd)
|
||||||
|
#define vfd_open(fd) ((int)(fd))
|
||||||
|
#define vfd_free(fd)
|
||||||
|
#endif
|
||||||
187
python/gevent/libev/stathelper.c
Normal file
187
python/gevent/libev/stathelper.c
Normal file
@@ -0,0 +1,187 @@
|
|||||||
|
/* copied from Python-2.7.2/Modules/posixmodule.c */
|
||||||
|
#include "structseq.h"
|
||||||
|
|
||||||
|
#define STRUCT_STAT struct stat
|
||||||
|
|
||||||
|
#ifdef HAVE_STRUCT_STAT_ST_BLKSIZE
|
||||||
|
#define ST_BLKSIZE_IDX 13
|
||||||
|
#else
|
||||||
|
#define ST_BLKSIZE_IDX 12
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef HAVE_STRUCT_STAT_ST_BLOCKS
|
||||||
|
#define ST_BLOCKS_IDX (ST_BLKSIZE_IDX+1)
|
||||||
|
#else
|
||||||
|
#define ST_BLOCKS_IDX ST_BLKSIZE_IDX
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef HAVE_STRUCT_STAT_ST_RDEV
|
||||||
|
#define ST_RDEV_IDX (ST_BLOCKS_IDX+1)
|
||||||
|
#else
|
||||||
|
#define ST_RDEV_IDX ST_BLOCKS_IDX
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef HAVE_STRUCT_STAT_ST_FLAGS
|
||||||
|
#define ST_FLAGS_IDX (ST_RDEV_IDX+1)
|
||||||
|
#else
|
||||||
|
#define ST_FLAGS_IDX ST_RDEV_IDX
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef HAVE_STRUCT_STAT_ST_GEN
|
||||||
|
#define ST_GEN_IDX (ST_FLAGS_IDX+1)
|
||||||
|
#else
|
||||||
|
#define ST_GEN_IDX ST_FLAGS_IDX
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef HAVE_STRUCT_STAT_ST_BIRTHTIME
|
||||||
|
#define ST_BIRTHTIME_IDX (ST_GEN_IDX+1)
|
||||||
|
#else
|
||||||
|
#define ST_BIRTHTIME_IDX ST_GEN_IDX
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
static PyObject* posixmodule = NULL;
|
||||||
|
static PyTypeObject* pStatResultType = NULL;
|
||||||
|
|
||||||
|
|
||||||
|
static PyObject* import_posixmodule(void)
|
||||||
|
{
|
||||||
|
if (!posixmodule) {
|
||||||
|
posixmodule = PyImport_ImportModule("posix");
|
||||||
|
}
|
||||||
|
return posixmodule;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static PyObject* import_StatResultType(void)
|
||||||
|
{
|
||||||
|
PyObject* p = NULL;
|
||||||
|
if (!pStatResultType) {
|
||||||
|
PyObject* module;
|
||||||
|
module = import_posixmodule();
|
||||||
|
if (module) {
|
||||||
|
p = PyObject_GetAttrString(module, "stat_result");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
fill_time(PyObject *v, int index, time_t sec, unsigned long nsec)
|
||||||
|
{
|
||||||
|
PyObject *fval,*ival;
|
||||||
|
#if SIZEOF_TIME_T > SIZEOF_LONG
|
||||||
|
ival = PyLong_FromLongLong((PY_LONG_LONG)sec);
|
||||||
|
#else
|
||||||
|
ival = PyInt_FromLong((long)sec);
|
||||||
|
#endif
|
||||||
|
if (!ival)
|
||||||
|
return;
|
||||||
|
fval = PyFloat_FromDouble(sec + 1e-9*nsec);
|
||||||
|
PyStructSequence_SET_ITEM(v, index, ival);
|
||||||
|
PyStructSequence_SET_ITEM(v, index+3, fval);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* pack a system stat C structure into the Python stat tuple
|
||||||
|
(used by posix_stat() and posix_fstat()) */
|
||||||
|
static PyObject*
|
||||||
|
_pystat_fromstructstat(STRUCT_STAT *st)
|
||||||
|
{
|
||||||
|
unsigned long ansec, mnsec, cnsec;
|
||||||
|
PyObject *v;
|
||||||
|
|
||||||
|
PyTypeObject* StatResultType = (PyTypeObject*)import_StatResultType();
|
||||||
|
if (StatResultType == NULL) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
v = PyStructSequence_New(StatResultType);
|
||||||
|
if (v == NULL)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
PyStructSequence_SET_ITEM(v, 0, PyInt_FromLong((long)st->st_mode));
|
||||||
|
#ifdef HAVE_LARGEFILE_SUPPORT
|
||||||
|
PyStructSequence_SET_ITEM(v, 1,
|
||||||
|
PyLong_FromLongLong((PY_LONG_LONG)st->st_ino));
|
||||||
|
#else
|
||||||
|
PyStructSequence_SET_ITEM(v, 1, PyInt_FromLong((long)st->st_ino));
|
||||||
|
#endif
|
||||||
|
#if defined(HAVE_LONG_LONG) && !defined(MS_WINDOWS)
|
||||||
|
PyStructSequence_SET_ITEM(v, 2,
|
||||||
|
PyLong_FromLongLong((PY_LONG_LONG)st->st_dev));
|
||||||
|
#else
|
||||||
|
PyStructSequence_SET_ITEM(v, 2, PyInt_FromLong((long)st->st_dev));
|
||||||
|
#endif
|
||||||
|
PyStructSequence_SET_ITEM(v, 3, PyInt_FromLong((long)st->st_nlink));
|
||||||
|
PyStructSequence_SET_ITEM(v, 4, PyInt_FromLong((long)st->st_uid));
|
||||||
|
PyStructSequence_SET_ITEM(v, 5, PyInt_FromLong((long)st->st_gid));
|
||||||
|
#ifdef HAVE_LARGEFILE_SUPPORT
|
||||||
|
PyStructSequence_SET_ITEM(v, 6,
|
||||||
|
PyLong_FromLongLong((PY_LONG_LONG)st->st_size));
|
||||||
|
#else
|
||||||
|
PyStructSequence_SET_ITEM(v, 6, PyInt_FromLong(st->st_size));
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(HAVE_STAT_TV_NSEC)
|
||||||
|
ansec = st->st_atim.tv_nsec;
|
||||||
|
mnsec = st->st_mtim.tv_nsec;
|
||||||
|
cnsec = st->st_ctim.tv_nsec;
|
||||||
|
#elif defined(HAVE_STAT_TV_NSEC2)
|
||||||
|
ansec = st->st_atimespec.tv_nsec;
|
||||||
|
mnsec = st->st_mtimespec.tv_nsec;
|
||||||
|
cnsec = st->st_ctimespec.tv_nsec;
|
||||||
|
#elif defined(HAVE_STAT_NSEC)
|
||||||
|
ansec = st->st_atime_nsec;
|
||||||
|
mnsec = st->st_mtime_nsec;
|
||||||
|
cnsec = st->st_ctime_nsec;
|
||||||
|
#else
|
||||||
|
ansec = mnsec = cnsec = 0;
|
||||||
|
#endif
|
||||||
|
fill_time(v, 7, st->st_atime, ansec);
|
||||||
|
fill_time(v, 8, st->st_mtime, mnsec);
|
||||||
|
fill_time(v, 9, st->st_ctime, cnsec);
|
||||||
|
|
||||||
|
#ifdef HAVE_STRUCT_STAT_ST_BLKSIZE
|
||||||
|
PyStructSequence_SET_ITEM(v, ST_BLKSIZE_IDX,
|
||||||
|
PyInt_FromLong((long)st->st_blksize));
|
||||||
|
#endif
|
||||||
|
#ifdef HAVE_STRUCT_STAT_ST_BLOCKS
|
||||||
|
PyStructSequence_SET_ITEM(v, ST_BLOCKS_IDX,
|
||||||
|
PyInt_FromLong((long)st->st_blocks));
|
||||||
|
#endif
|
||||||
|
#ifdef HAVE_STRUCT_STAT_ST_RDEV
|
||||||
|
PyStructSequence_SET_ITEM(v, ST_RDEV_IDX,
|
||||||
|
PyInt_FromLong((long)st->st_rdev));
|
||||||
|
#endif
|
||||||
|
#ifdef HAVE_STRUCT_STAT_ST_GEN
|
||||||
|
PyStructSequence_SET_ITEM(v, ST_GEN_IDX,
|
||||||
|
PyInt_FromLong((long)st->st_gen));
|
||||||
|
#endif
|
||||||
|
#ifdef HAVE_STRUCT_STAT_ST_BIRTHTIME
|
||||||
|
{
|
||||||
|
PyObject *val;
|
||||||
|
unsigned long bsec,bnsec;
|
||||||
|
bsec = (long)st->st_birthtime;
|
||||||
|
#ifdef HAVE_STAT_TV_NSEC2
|
||||||
|
bnsec = st->st_birthtimespec.tv_nsec;
|
||||||
|
#else
|
||||||
|
bnsec = 0;
|
||||||
|
#endif
|
||||||
|
val = PyFloat_FromDouble(bsec + 1e-9*bnsec);
|
||||||
|
PyStructSequence_SET_ITEM(v, ST_BIRTHTIME_IDX,
|
||||||
|
val);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
#ifdef HAVE_STRUCT_STAT_ST_FLAGS
|
||||||
|
PyStructSequence_SET_ITEM(v, ST_FLAGS_IDX,
|
||||||
|
PyInt_FromLong((long)st->st_flags));
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (PyErr_Occurred()) {
|
||||||
|
Py_DECREF(v);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return v;
|
||||||
|
}
|
||||||
293
python/gevent/local.py
Normal file
293
python/gevent/local.py
Normal file
@@ -0,0 +1,293 @@
|
|||||||
|
"""
|
||||||
|
Greenlet-local objects.
|
||||||
|
|
||||||
|
This module is based on `_threading_local.py`__ from the standard
|
||||||
|
library of Python 3.4.
|
||||||
|
|
||||||
|
__ https://github.com/python/cpython/blob/3.4/Lib/_threading_local.py
|
||||||
|
|
||||||
|
Greenlet-local objects support the management of greenlet-local data.
|
||||||
|
If you have data that you want to be local to a greenlet, simply create
|
||||||
|
a greenlet-local object and use its attributes:
|
||||||
|
|
||||||
|
>>> mydata = local()
|
||||||
|
>>> mydata.number = 42
|
||||||
|
>>> mydata.number
|
||||||
|
42
|
||||||
|
|
||||||
|
You can also access the local-object's dictionary:
|
||||||
|
|
||||||
|
>>> mydata.__dict__
|
||||||
|
{'number': 42}
|
||||||
|
>>> mydata.__dict__.setdefault('widgets', [])
|
||||||
|
[]
|
||||||
|
>>> mydata.widgets
|
||||||
|
[]
|
||||||
|
|
||||||
|
What's important about greenlet-local objects is that their data are
|
||||||
|
local to a greenlet. If we access the data in a different greenlet:
|
||||||
|
|
||||||
|
>>> log = []
|
||||||
|
>>> def f():
|
||||||
|
... items = list(mydata.__dict__.items())
|
||||||
|
... items.sort()
|
||||||
|
... log.append(items)
|
||||||
|
... mydata.number = 11
|
||||||
|
... log.append(mydata.number)
|
||||||
|
>>> greenlet = gevent.spawn(f)
|
||||||
|
>>> greenlet.join()
|
||||||
|
>>> log
|
||||||
|
[[], 11]
|
||||||
|
|
||||||
|
we get different data. Furthermore, changes made in the other greenlet
|
||||||
|
don't affect data seen in this greenlet:
|
||||||
|
|
||||||
|
>>> mydata.number
|
||||||
|
42
|
||||||
|
|
||||||
|
Of course, values you get from a local object, including a __dict__
|
||||||
|
attribute, are for whatever greenlet was current at the time the
|
||||||
|
attribute was read. For that reason, you generally don't want to save
|
||||||
|
these values across greenlets, as they apply only to the greenlet they
|
||||||
|
came from.
|
||||||
|
|
||||||
|
You can create custom local objects by subclassing the local class:
|
||||||
|
|
||||||
|
>>> class MyLocal(local):
|
||||||
|
... number = 2
|
||||||
|
... initialized = False
|
||||||
|
... def __init__(self, **kw):
|
||||||
|
... if self.initialized:
|
||||||
|
... raise SystemError('__init__ called too many times')
|
||||||
|
... self.initialized = True
|
||||||
|
... self.__dict__.update(kw)
|
||||||
|
... def squared(self):
|
||||||
|
... return self.number ** 2
|
||||||
|
|
||||||
|
This can be useful to support default values, methods and
|
||||||
|
initialization. Note that if you define an __init__ method, it will be
|
||||||
|
called each time the local object is used in a separate greenlet. This
|
||||||
|
is necessary to initialize each greenlet's dictionary.
|
||||||
|
|
||||||
|
Now if we create a local object:
|
||||||
|
|
||||||
|
>>> mydata = MyLocal(color='red')
|
||||||
|
|
||||||
|
Now we have a default number:
|
||||||
|
|
||||||
|
>>> mydata.number
|
||||||
|
2
|
||||||
|
|
||||||
|
an initial color:
|
||||||
|
|
||||||
|
>>> mydata.color
|
||||||
|
'red'
|
||||||
|
>>> del mydata.color
|
||||||
|
|
||||||
|
And a method that operates on the data:
|
||||||
|
|
||||||
|
>>> mydata.squared()
|
||||||
|
4
|
||||||
|
|
||||||
|
As before, we can access the data in a separate greenlet:
|
||||||
|
|
||||||
|
>>> log = []
|
||||||
|
>>> greenlet = gevent.spawn(f)
|
||||||
|
>>> greenlet.join()
|
||||||
|
>>> log
|
||||||
|
[[('color', 'red'), ('initialized', True)], 11]
|
||||||
|
|
||||||
|
without affecting this greenlet's data:
|
||||||
|
|
||||||
|
>>> mydata.number
|
||||||
|
2
|
||||||
|
>>> mydata.color
|
||||||
|
Traceback (most recent call last):
|
||||||
|
...
|
||||||
|
AttributeError: 'MyLocal' object has no attribute 'color'
|
||||||
|
|
||||||
|
Note that subclasses can define slots, but they are not greenlet
|
||||||
|
local. They are shared across greenlets::
|
||||||
|
|
||||||
|
>>> class MyLocal(local):
|
||||||
|
... __slots__ = 'number'
|
||||||
|
|
||||||
|
>>> mydata = MyLocal()
|
||||||
|
>>> mydata.number = 42
|
||||||
|
>>> mydata.color = 'red'
|
||||||
|
|
||||||
|
So, the separate greenlet:
|
||||||
|
|
||||||
|
>>> greenlet = gevent.spawn(f)
|
||||||
|
>>> greenlet.join()
|
||||||
|
|
||||||
|
affects what we see:
|
||||||
|
|
||||||
|
>>> mydata.number
|
||||||
|
11
|
||||||
|
|
||||||
|
>>> del mydata
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1a2
|
||||||
|
Update the implementation to match Python 3.4 instead of Python 2.5.
|
||||||
|
This results in locals being eligible for garbage collection as soon
|
||||||
|
as their greenlet exits.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from copy import copy
|
||||||
|
from weakref import ref
|
||||||
|
from contextlib import contextmanager
|
||||||
|
from gevent.hub import getcurrent
|
||||||
|
from gevent._compat import PYPY
|
||||||
|
from gevent.lock import RLock
|
||||||
|
|
||||||
|
__all__ = ["local"]
|
||||||
|
|
||||||
|
|
||||||
|
class _wrefdict(dict):
|
||||||
|
"""A dict that can be weak referenced"""
|
||||||
|
|
||||||
|
|
||||||
|
class _localimpl(object):
|
||||||
|
"""A class managing thread-local dicts"""
|
||||||
|
__slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__'
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
# The key used in the Thread objects' attribute dicts.
|
||||||
|
# We keep it a string for speed but make it unlikely to clash with
|
||||||
|
# a "real" attribute.
|
||||||
|
self.key = '_threading_local._localimpl.' + str(id(self))
|
||||||
|
# { id(Thread) -> (ref(Thread), thread-local dict) }
|
||||||
|
self.dicts = _wrefdict()
|
||||||
|
|
||||||
|
def get_dict(self):
|
||||||
|
"""Return the dict for the current thread. Raises KeyError if none
|
||||||
|
defined."""
|
||||||
|
thread = getcurrent()
|
||||||
|
return self.dicts[id(thread)][1]
|
||||||
|
|
||||||
|
def create_dict(self):
|
||||||
|
"""Create a new dict for the current thread, and return it."""
|
||||||
|
localdict = {}
|
||||||
|
key = self.key
|
||||||
|
thread = getcurrent()
|
||||||
|
idt = id(thread)
|
||||||
|
|
||||||
|
# If we are working with a gevent.greenlet.Greenlet, we can
|
||||||
|
# pro-actively clear out with a link. Use rawlink to avoid
|
||||||
|
# spawning any more greenlets
|
||||||
|
try:
|
||||||
|
rawlink = thread.rawlink
|
||||||
|
except AttributeError:
|
||||||
|
# Otherwise we need to do it with weak refs
|
||||||
|
def local_deleted(_, key=key):
|
||||||
|
# When the localimpl is deleted, remove the thread attribute.
|
||||||
|
thread = wrthread()
|
||||||
|
if thread is not None:
|
||||||
|
del thread.__dict__[key]
|
||||||
|
|
||||||
|
def thread_deleted(_, idt=idt):
|
||||||
|
# When the thread is deleted, remove the local dict.
|
||||||
|
# Note that this is suboptimal if the thread object gets
|
||||||
|
# caught in a reference loop. We would like to be called
|
||||||
|
# as soon as the OS-level thread ends instead.
|
||||||
|
_local = wrlocal()
|
||||||
|
if _local is not None:
|
||||||
|
_local.dicts.pop(idt, None)
|
||||||
|
wrlocal = ref(self, local_deleted)
|
||||||
|
wrthread = ref(thread, thread_deleted)
|
||||||
|
thread.__dict__[key] = wrlocal
|
||||||
|
else:
|
||||||
|
wrdicts = ref(self.dicts)
|
||||||
|
|
||||||
|
def clear(_):
|
||||||
|
dicts = wrdicts()
|
||||||
|
if dicts:
|
||||||
|
dicts.pop(idt, None)
|
||||||
|
rawlink(clear)
|
||||||
|
wrthread = None
|
||||||
|
|
||||||
|
self.dicts[idt] = wrthread, localdict
|
||||||
|
return localdict
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def _patch(self):
|
||||||
|
impl = object.__getattribute__(self, '_local__impl')
|
||||||
|
orig_dct = object.__getattribute__(self, '__dict__')
|
||||||
|
try:
|
||||||
|
dct = impl.get_dict()
|
||||||
|
except KeyError:
|
||||||
|
# it's OK to acquire the lock here and not earlier, because the above code won't switch out
|
||||||
|
# however, subclassed __init__ might switch, so we do need to acquire the lock here
|
||||||
|
dct = impl.create_dict()
|
||||||
|
args, kw = impl.localargs
|
||||||
|
with impl.locallock:
|
||||||
|
self.__init__(*args, **kw)
|
||||||
|
with impl.locallock:
|
||||||
|
object.__setattr__(self, '__dict__', dct)
|
||||||
|
yield
|
||||||
|
object.__setattr__(self, '__dict__', orig_dct)
|
||||||
|
|
||||||
|
|
||||||
|
class local(object):
|
||||||
|
"""
|
||||||
|
An object whose attributes are greenlet-local.
|
||||||
|
"""
|
||||||
|
__slots__ = '_local__impl', '__dict__'
|
||||||
|
|
||||||
|
def __new__(cls, *args, **kw):
|
||||||
|
if args or kw:
|
||||||
|
if (PYPY and cls.__init__ == object.__init__) or (not PYPY and cls.__init__ is object.__init__):
|
||||||
|
raise TypeError("Initialization arguments are not supported")
|
||||||
|
self = object.__new__(cls)
|
||||||
|
impl = _localimpl()
|
||||||
|
impl.localargs = (args, kw)
|
||||||
|
impl.locallock = RLock()
|
||||||
|
object.__setattr__(self, '_local__impl', impl)
|
||||||
|
# We need to create the thread dict in anticipation of
|
||||||
|
# __init__ being called, to make sure we don't call it
|
||||||
|
# again ourselves.
|
||||||
|
impl.create_dict()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __getattribute__(self, name):
|
||||||
|
with _patch(self):
|
||||||
|
return object.__getattribute__(self, name)
|
||||||
|
|
||||||
|
def __setattr__(self, name, value):
|
||||||
|
if name == '__dict__':
|
||||||
|
raise AttributeError(
|
||||||
|
"%r object attribute '__dict__' is read-only"
|
||||||
|
% self.__class__.__name__)
|
||||||
|
with _patch(self):
|
||||||
|
return object.__setattr__(self, name, value)
|
||||||
|
|
||||||
|
def __delattr__(self, name):
|
||||||
|
if name == '__dict__':
|
||||||
|
raise AttributeError(
|
||||||
|
"%r object attribute '__dict__' is read-only"
|
||||||
|
% self.__class__.__name__)
|
||||||
|
with _patch(self):
|
||||||
|
return object.__delattr__(self, name)
|
||||||
|
|
||||||
|
def __copy__(self):
|
||||||
|
impl = object.__getattribute__(self, '_local__impl')
|
||||||
|
current = getcurrent()
|
||||||
|
currentId = id(current)
|
||||||
|
d = impl.get_dict()
|
||||||
|
duplicate = copy(d)
|
||||||
|
|
||||||
|
cls = type(self)
|
||||||
|
if (PYPY and cls.__init__ != object.__init__) or (not PYPY and cls.__init__ is not object.__init__):
|
||||||
|
args, kw = impl.localargs
|
||||||
|
instance = cls(*args, **kw)
|
||||||
|
else:
|
||||||
|
instance = cls()
|
||||||
|
|
||||||
|
new_impl = object.__getattribute__(instance, '_local__impl')
|
||||||
|
tpl = new_impl.dicts[currentId]
|
||||||
|
new_impl.dicts[currentId] = (tpl[0], duplicate)
|
||||||
|
|
||||||
|
return instance
|
||||||
260
python/gevent/lock.py
Normal file
260
python/gevent/lock.py
Normal file
@@ -0,0 +1,260 @@
|
|||||||
|
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
|
||||||
|
"""Locking primitives"""
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
from gevent.hub import getcurrent
|
||||||
|
from gevent._compat import PYPY
|
||||||
|
from gevent._semaphore import Semaphore, BoundedSemaphore # pylint:disable=no-name-in-module,import-error
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'Semaphore',
|
||||||
|
'DummySemaphore',
|
||||||
|
'BoundedSemaphore',
|
||||||
|
'RLock',
|
||||||
|
]
|
||||||
|
|
||||||
|
# On PyPy, we don't compile the Semaphore class with Cython. Under
|
||||||
|
# Cython, each individual method holds the GIL for its entire
|
||||||
|
# duration, ensuring that no other thread can interrupt us in an
|
||||||
|
# unsafe state (only when we _do_wait do we call back into Python and
|
||||||
|
# allow switching threads). Simulate that here through the use of a manual
|
||||||
|
# lock. (We use a separate lock for each semaphore to allow sys.settrace functions
|
||||||
|
# to use locks *other* than the one being traced.)
|
||||||
|
if PYPY:
|
||||||
|
# TODO: Need to use monkey.get_original?
|
||||||
|
try:
|
||||||
|
from _thread import allocate_lock as _allocate_lock # pylint:disable=import-error,useless-suppression
|
||||||
|
from _thread import get_ident as _get_ident # pylint:disable=import-error,useless-suppression
|
||||||
|
except ImportError:
|
||||||
|
# Python 2
|
||||||
|
from thread import allocate_lock as _allocate_lock # pylint:disable=import-error,useless-suppression
|
||||||
|
from thread import get_ident as _get_ident # pylint:disable=import-error,useless-suppression
|
||||||
|
_sem_lock = _allocate_lock()
|
||||||
|
|
||||||
|
def untraceable(f):
|
||||||
|
# Don't allow re-entry to these functions in a single thread, as can
|
||||||
|
# happen if a sys.settrace is used
|
||||||
|
def wrapper(self):
|
||||||
|
me = _get_ident()
|
||||||
|
try:
|
||||||
|
count = self._locking[me]
|
||||||
|
except KeyError:
|
||||||
|
count = self._locking[me] = 1
|
||||||
|
else:
|
||||||
|
count = self._locking[me] = count + 1
|
||||||
|
if count:
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
return f(self)
|
||||||
|
finally:
|
||||||
|
count = count - 1
|
||||||
|
if not count:
|
||||||
|
del self._locking[me]
|
||||||
|
else:
|
||||||
|
self._locking[me] = count
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
class _OwnedLock(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._owner = None
|
||||||
|
self._block = _allocate_lock()
|
||||||
|
self._locking = {}
|
||||||
|
self._count = 0
|
||||||
|
|
||||||
|
@untraceable
|
||||||
|
def acquire(self):
|
||||||
|
me = _get_ident()
|
||||||
|
if self._owner == me:
|
||||||
|
self._count += 1
|
||||||
|
return
|
||||||
|
|
||||||
|
self._owner = me
|
||||||
|
self._block.acquire()
|
||||||
|
self._count = 1
|
||||||
|
|
||||||
|
@untraceable
|
||||||
|
def release(self):
|
||||||
|
self._count = count = self._count - 1
|
||||||
|
if not count:
|
||||||
|
self._block.release()
|
||||||
|
self._owner = None
|
||||||
|
|
||||||
|
# acquire, wait, and release all acquire the lock on entry and release it
|
||||||
|
# on exit. acquire and wait can call _do_wait, which must release it on entry
|
||||||
|
# and re-acquire it for them on exit.
|
||||||
|
class _around(object):
|
||||||
|
__slots__ = ('before', 'after')
|
||||||
|
|
||||||
|
def __init__(self, before, after):
|
||||||
|
self.before = before
|
||||||
|
self.after = after
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.before()
|
||||||
|
|
||||||
|
def __exit__(self, t, v, tb):
|
||||||
|
self.after()
|
||||||
|
|
||||||
|
def _decorate(func, cmname):
|
||||||
|
# functools.wrap?
|
||||||
|
def wrapped(self, *args, **kwargs):
|
||||||
|
with getattr(self, cmname):
|
||||||
|
return func(self, *args, **kwargs)
|
||||||
|
return wrapped
|
||||||
|
|
||||||
|
Semaphore._py3k_acquire = Semaphore.acquire = _decorate(Semaphore.acquire, '_lock_locked')
|
||||||
|
Semaphore.release = _decorate(Semaphore.release, '_lock_locked')
|
||||||
|
Semaphore.wait = _decorate(Semaphore.wait, '_lock_locked')
|
||||||
|
Semaphore._do_wait = _decorate(Semaphore._do_wait, '_lock_unlocked')
|
||||||
|
|
||||||
|
_Sem_init = Semaphore.__init__
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
l = self._lock_lock = _OwnedLock()
|
||||||
|
self._lock_locked = _around(l.acquire, l.release)
|
||||||
|
self._lock_unlocked = _around(l.release, l.acquire)
|
||||||
|
|
||||||
|
_Sem_init(self, *args, **kwargs)
|
||||||
|
|
||||||
|
Semaphore.__init__ = __init__
|
||||||
|
|
||||||
|
del _decorate
|
||||||
|
del untraceable
|
||||||
|
|
||||||
|
|
||||||
|
class DummySemaphore(object):
|
||||||
|
"""
|
||||||
|
DummySemaphore(value=None) -> DummySemaphore
|
||||||
|
|
||||||
|
A Semaphore initialized with "infinite" initial value. None of its
|
||||||
|
methods ever block.
|
||||||
|
|
||||||
|
This can be used to parameterize on whether or not to actually
|
||||||
|
guard access to a potentially limited resource. If the resource is
|
||||||
|
actually limited, such as a fixed-size thread pool, use a real
|
||||||
|
:class:`Semaphore`, but if the resource is unbounded, use an
|
||||||
|
instance of this class. In that way none of the supporting code
|
||||||
|
needs to change.
|
||||||
|
|
||||||
|
Similarly, it can be used to parameterize on whether or not to
|
||||||
|
enforce mutual exclusion to some underlying object. If the
|
||||||
|
underlying object is known to be thread-safe itself mutual
|
||||||
|
exclusion is not needed and a ``DummySemaphore`` can be used, but
|
||||||
|
if that's not true, use a real ``Semaphore``.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Internally this is used for exactly the purpose described in the
|
||||||
|
# documentation. gevent.pool.Pool uses it instead of a Semaphore
|
||||||
|
# when the pool size is unlimited, and
|
||||||
|
# gevent.fileobject.FileObjectThread takes a parameter that
|
||||||
|
# determines whether it should lock around IO to the underlying
|
||||||
|
# file object.
|
||||||
|
|
||||||
|
def __init__(self, value=None):
|
||||||
|
"""
|
||||||
|
.. versionchanged:: 1.1rc3
|
||||||
|
Accept and ignore a *value* argument for compatibility with Semaphore.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return '<%s>' % self.__class__.__name__
|
||||||
|
|
||||||
|
def locked(self):
|
||||||
|
"""A DummySemaphore is never locked so this always returns False."""
|
||||||
|
return False
|
||||||
|
|
||||||
|
def release(self):
|
||||||
|
"""Releasing a dummy semaphore does nothing."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def rawlink(self, callback):
|
||||||
|
# XXX should still work and notify?
|
||||||
|
pass
|
||||||
|
|
||||||
|
def unlink(self, callback):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def wait(self, timeout=None):
|
||||||
|
"""Waiting for a DummySemaphore returns immediately."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def acquire(self, blocking=True, timeout=None):
|
||||||
|
"""
|
||||||
|
A DummySemaphore can always be acquired immediately so this always
|
||||||
|
returns True and ignores its arguments.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1a1
|
||||||
|
Always return *true*.
|
||||||
|
"""
|
||||||
|
# pylint:disable=unused-argument
|
||||||
|
return True
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def __exit__(self, typ, val, tb):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class RLock(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._block = Semaphore(1)
|
||||||
|
self._owner = None
|
||||||
|
self._count = 0
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<%s at 0x%x _block=%s _count=%r _owner=%r)>" % (
|
||||||
|
self.__class__.__name__,
|
||||||
|
id(self),
|
||||||
|
self._block,
|
||||||
|
self._count,
|
||||||
|
self._owner)
|
||||||
|
|
||||||
|
def acquire(self, blocking=1):
|
||||||
|
me = getcurrent()
|
||||||
|
if self._owner is me:
|
||||||
|
self._count = self._count + 1
|
||||||
|
return 1
|
||||||
|
rc = self._block.acquire(blocking)
|
||||||
|
if rc:
|
||||||
|
self._owner = me
|
||||||
|
self._count = 1
|
||||||
|
return rc
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
return self.acquire()
|
||||||
|
|
||||||
|
def release(self):
|
||||||
|
if self._owner is not getcurrent():
|
||||||
|
raise RuntimeError("cannot release un-aquired lock")
|
||||||
|
self._count = count = self._count - 1
|
||||||
|
if not count:
|
||||||
|
self._owner = None
|
||||||
|
self._block.release()
|
||||||
|
|
||||||
|
def __exit__(self, typ, value, tb):
|
||||||
|
self.release()
|
||||||
|
|
||||||
|
# Internal methods used by condition variables
|
||||||
|
|
||||||
|
def _acquire_restore(self, count_owner):
|
||||||
|
count, owner = count_owner
|
||||||
|
self._block.acquire()
|
||||||
|
self._count = count
|
||||||
|
self._owner = owner
|
||||||
|
|
||||||
|
def _release_save(self):
|
||||||
|
count = self._count
|
||||||
|
self._count = 0
|
||||||
|
owner = self._owner
|
||||||
|
self._owner = None
|
||||||
|
self._block.release()
|
||||||
|
return (count, owner)
|
||||||
|
|
||||||
|
def _is_owned(self):
|
||||||
|
return self._owner is getcurrent()
|
||||||
702
python/gevent/monkey.py
Normal file
702
python/gevent/monkey.py
Normal file
@@ -0,0 +1,702 @@
|
|||||||
|
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
|
||||||
|
# pylint: disable=redefined-outer-name
|
||||||
|
"""
|
||||||
|
Make the standard library cooperative.
|
||||||
|
|
||||||
|
Patching
|
||||||
|
========
|
||||||
|
|
||||||
|
The primary purpose of this module is to carefully patch, in place,
|
||||||
|
portions of the standard library with gevent-friendly functions that
|
||||||
|
behave in the same way as the original (at least as closely as possible).
|
||||||
|
|
||||||
|
The primary interface to this is the :func:`patch_all` function, which
|
||||||
|
performs all the available patches. It accepts arguments to limit the
|
||||||
|
patching to certain modules, but most programs **should** use the
|
||||||
|
default values as they receive the most wide-spread testing, and some monkey
|
||||||
|
patches have dependencies on others.
|
||||||
|
|
||||||
|
Patching **should be done as early as possible** in the lifecycle of the
|
||||||
|
program. For example, the main module (the one that tests against
|
||||||
|
``__main__`` or is otherwise the first imported) should begin with
|
||||||
|
this code, ideally before any other imports::
|
||||||
|
|
||||||
|
from gevent import monkey
|
||||||
|
monkey.patch_all()
|
||||||
|
|
||||||
|
.. tip::
|
||||||
|
|
||||||
|
Some frameworks, such as gunicorn, handle monkey-patching for you.
|
||||||
|
Check their documentation to be sure.
|
||||||
|
|
||||||
|
Querying
|
||||||
|
--------
|
||||||
|
|
||||||
|
Sometimes it is helpful to know if objects have been monkey-patched, and in
|
||||||
|
advanced cases even to have access to the original standard library functions. This
|
||||||
|
module provides functions for that purpose.
|
||||||
|
|
||||||
|
- :func:`is_module_patched`
|
||||||
|
- :func:`is_object_patched`
|
||||||
|
- :func:`get_original`
|
||||||
|
|
||||||
|
Use as a module
|
||||||
|
===============
|
||||||
|
|
||||||
|
Sometimes it is useful to run existing python scripts or modules that
|
||||||
|
were not built to be gevent aware under gevent. To do so, this module
|
||||||
|
can be run as the main module, passing the script and its arguments.
|
||||||
|
For details, see the :func:`main` function.
|
||||||
|
|
||||||
|
Functions
|
||||||
|
=========
|
||||||
|
"""
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import print_function
|
||||||
|
import sys
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'patch_all',
|
||||||
|
'patch_builtins',
|
||||||
|
'patch_dns',
|
||||||
|
'patch_os',
|
||||||
|
'patch_select',
|
||||||
|
'patch_signal',
|
||||||
|
'patch_socket',
|
||||||
|
'patch_ssl',
|
||||||
|
'patch_subprocess',
|
||||||
|
'patch_sys',
|
||||||
|
'patch_thread',
|
||||||
|
'patch_time',
|
||||||
|
# query functions
|
||||||
|
'get_original',
|
||||||
|
'is_module_patched',
|
||||||
|
'is_object_patched',
|
||||||
|
# module functions
|
||||||
|
'main',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
if sys.version_info[0] >= 3:
|
||||||
|
string_types = (str,)
|
||||||
|
PY3 = True
|
||||||
|
else:
|
||||||
|
import __builtin__ # pylint:disable=import-error
|
||||||
|
string_types = (__builtin__.basestring,)
|
||||||
|
PY3 = False
|
||||||
|
|
||||||
|
WIN = sys.platform.startswith("win")
|
||||||
|
|
||||||
|
# maps module name -> {attribute name: original item}
|
||||||
|
# e.g. "time" -> {"sleep": built-in function sleep}
|
||||||
|
saved = {}
|
||||||
|
|
||||||
|
|
||||||
|
def is_module_patched(modname):
|
||||||
|
"""Check if a module has been replaced with a cooperative version."""
|
||||||
|
return modname in saved
|
||||||
|
|
||||||
|
|
||||||
|
def is_object_patched(modname, objname):
|
||||||
|
"""Check if an object in a module has been replaced with a cooperative version."""
|
||||||
|
return is_module_patched(modname) and objname in saved[modname]
|
||||||
|
|
||||||
|
|
||||||
|
def _get_original(name, items):
|
||||||
|
d = saved.get(name, {})
|
||||||
|
values = []
|
||||||
|
module = None
|
||||||
|
for item in items:
|
||||||
|
if item in d:
|
||||||
|
values.append(d[item])
|
||||||
|
else:
|
||||||
|
if module is None:
|
||||||
|
module = __import__(name)
|
||||||
|
values.append(getattr(module, item))
|
||||||
|
return values
|
||||||
|
|
||||||
|
|
||||||
|
def get_original(mod_name, item_name):
|
||||||
|
"""Retrieve the original object from a module.
|
||||||
|
|
||||||
|
If the object has not been patched, then that object will still be retrieved.
|
||||||
|
|
||||||
|
:param item_name: A string or sequence of strings naming the attribute(s) on the module
|
||||||
|
``mod_name`` to return.
|
||||||
|
:return: The original value if a string was given for ``item_name`` or a sequence
|
||||||
|
of original values if a sequence was passed.
|
||||||
|
"""
|
||||||
|
if isinstance(item_name, string_types):
|
||||||
|
return _get_original(mod_name, [item_name])[0]
|
||||||
|
return _get_original(mod_name, item_name)
|
||||||
|
|
||||||
|
_NONE = object()
|
||||||
|
|
||||||
|
|
||||||
|
def patch_item(module, attr, newitem):
|
||||||
|
olditem = getattr(module, attr, _NONE)
|
||||||
|
if olditem is not _NONE:
|
||||||
|
saved.setdefault(module.__name__, {}).setdefault(attr, olditem)
|
||||||
|
setattr(module, attr, newitem)
|
||||||
|
|
||||||
|
|
||||||
|
def remove_item(module, attr):
|
||||||
|
olditem = getattr(module, attr, _NONE)
|
||||||
|
if olditem is _NONE:
|
||||||
|
return
|
||||||
|
saved.setdefault(module.__name__, {}).setdefault(attr, olditem)
|
||||||
|
delattr(module, attr)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_module(name, items=None):
|
||||||
|
gevent_module = getattr(__import__('gevent.' + name), name)
|
||||||
|
module_name = getattr(gevent_module, '__target__', name)
|
||||||
|
module = __import__(module_name)
|
||||||
|
if items is None:
|
||||||
|
items = getattr(gevent_module, '__implements__', None)
|
||||||
|
if items is None:
|
||||||
|
raise AttributeError('%r does not have __implements__' % gevent_module)
|
||||||
|
for attr in items:
|
||||||
|
patch_item(module, attr, getattr(gevent_module, attr))
|
||||||
|
return module
|
||||||
|
|
||||||
|
|
||||||
|
def _queue_warning(message, _warnings):
|
||||||
|
# Queues a warning to show after the monkey-patching process is all done.
|
||||||
|
# Done this way to avoid extra imports during the process itself, just
|
||||||
|
# in case. If we're calling a function one-off (unusual) go ahead and do it
|
||||||
|
if _warnings is None:
|
||||||
|
_process_warnings([message])
|
||||||
|
else:
|
||||||
|
_warnings.append(message)
|
||||||
|
|
||||||
|
|
||||||
|
def _process_warnings(_warnings):
|
||||||
|
import warnings
|
||||||
|
for warning in _warnings:
|
||||||
|
warnings.warn(warning, RuntimeWarning, stacklevel=3)
|
||||||
|
|
||||||
|
|
||||||
|
def _patch_sys_std(name):
|
||||||
|
from gevent.fileobject import FileObjectThread
|
||||||
|
orig = getattr(sys, name)
|
||||||
|
if not isinstance(orig, FileObjectThread):
|
||||||
|
patch_item(sys, name, FileObjectThread(orig))
|
||||||
|
|
||||||
|
|
||||||
|
def patch_sys(stdin=True, stdout=True, stderr=True):
|
||||||
|
"""Patch sys.std[in,out,err] to use a cooperative IO via a threadpool.
|
||||||
|
|
||||||
|
This is relatively dangerous and can have unintended consequences such as hanging
|
||||||
|
the process or `misinterpreting control keys`_ when ``input`` and ``raw_input``
|
||||||
|
are used.
|
||||||
|
|
||||||
|
This method does nothing on Python 3. The Python 3 interpreter wants to flush
|
||||||
|
the TextIOWrapper objects that make up stderr/stdout at shutdown time, but
|
||||||
|
using a threadpool at that time leads to a hang.
|
||||||
|
|
||||||
|
.. _`misinterpreting control keys`: https://github.com/gevent/gevent/issues/274
|
||||||
|
"""
|
||||||
|
# test__issue6.py demonstrates the hang if these lines are removed;
|
||||||
|
# strangely enough that test passes even without monkey-patching sys
|
||||||
|
if PY3:
|
||||||
|
return
|
||||||
|
|
||||||
|
if stdin:
|
||||||
|
_patch_sys_std('stdin')
|
||||||
|
if stdout:
|
||||||
|
_patch_sys_std('stdout')
|
||||||
|
if stderr:
|
||||||
|
_patch_sys_std('stderr')
|
||||||
|
|
||||||
|
|
||||||
|
def patch_os():
|
||||||
|
"""
|
||||||
|
Replace :func:`os.fork` with :func:`gevent.fork`, and, on POSIX,
|
||||||
|
:func:`os.waitpid` with :func:`gevent.os.waitpid` (if the
|
||||||
|
environment variable ``GEVENT_NOWAITPID`` is not defined). Does
|
||||||
|
nothing if fork is not available.
|
||||||
|
|
||||||
|
.. caution:: This method must be used with :func:`patch_signal` to have proper SIGCHLD
|
||||||
|
handling and thus correct results from ``waitpid``.
|
||||||
|
:func:`patch_all` calls both by default.
|
||||||
|
|
||||||
|
.. caution:: For SIGCHLD handling to work correctly, the event loop must run.
|
||||||
|
The easiest way to help ensure this is to use :func:`patch_all`.
|
||||||
|
"""
|
||||||
|
patch_module('os')
|
||||||
|
|
||||||
|
|
||||||
|
def patch_time():
|
||||||
|
"""Replace :func:`time.sleep` with :func:`gevent.sleep`."""
|
||||||
|
from gevent.hub import sleep
|
||||||
|
import time
|
||||||
|
patch_item(time, 'sleep', sleep)
|
||||||
|
|
||||||
|
|
||||||
|
def _patch_existing_locks(threading):
|
||||||
|
if len(list(threading.enumerate())) != 1:
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
tid = threading.get_ident()
|
||||||
|
except AttributeError:
|
||||||
|
tid = threading._get_ident()
|
||||||
|
rlock_type = type(threading.RLock())
|
||||||
|
try:
|
||||||
|
import importlib._bootstrap
|
||||||
|
except ImportError:
|
||||||
|
class _ModuleLock(object):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
_ModuleLock = importlib._bootstrap._ModuleLock # python 2 pylint: disable=no-member
|
||||||
|
# It might be possible to walk up all the existing stack frames to find
|
||||||
|
# locked objects...at least if they use `with`. To be sure, we look at every object
|
||||||
|
# Since we're supposed to be done very early in the process, there shouldn't be
|
||||||
|
# too many.
|
||||||
|
|
||||||
|
# By definition there's only one thread running, so the various
|
||||||
|
# owner attributes were the old (native) thread id. Make it our
|
||||||
|
# current greenlet id so that when it wants to unlock and compare
|
||||||
|
# self.__owner with _get_ident(), they match.
|
||||||
|
gc = __import__('gc')
|
||||||
|
for o in gc.get_objects():
|
||||||
|
if isinstance(o, rlock_type):
|
||||||
|
if hasattr(o, '_owner'): # Py3
|
||||||
|
if o._owner is not None:
|
||||||
|
o._owner = tid
|
||||||
|
else:
|
||||||
|
if o._RLock__owner is not None:
|
||||||
|
o._RLock__owner = tid
|
||||||
|
elif isinstance(o, _ModuleLock):
|
||||||
|
if o.owner is not None:
|
||||||
|
o.owner = tid
|
||||||
|
|
||||||
|
|
||||||
|
def patch_thread(threading=True, _threading_local=True, Event=False, logging=True,
|
||||||
|
existing_locks=True,
|
||||||
|
_warnings=None):
|
||||||
|
"""
|
||||||
|
Replace the standard :mod:`thread` module to make it greenlet-based.
|
||||||
|
|
||||||
|
- If *threading* is true (the default), also patch ``threading``.
|
||||||
|
- If *_threading_local* is true (the default), also patch ``_threading_local.local``.
|
||||||
|
- If *logging* is True (the default), also patch locks taken if the logging module has
|
||||||
|
been configured.
|
||||||
|
- If *existing_locks* is True (the default), and the process is still single threaded,
|
||||||
|
make sure than any :class:`threading.RLock` (and, under Python 3, :class:`importlib._bootstrap._ModuleLock`)
|
||||||
|
instances that are currently locked can be properly unlocked.
|
||||||
|
|
||||||
|
.. caution::
|
||||||
|
Monkey-patching :mod:`thread` and using
|
||||||
|
:class:`multiprocessing.Queue` or
|
||||||
|
:class:`concurrent.futures.ProcessPoolExecutor` (which uses a
|
||||||
|
``Queue``) will hang the process.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1b1
|
||||||
|
Add *logging* and *existing_locks* params.
|
||||||
|
"""
|
||||||
|
# XXX: Simplify
|
||||||
|
# pylint:disable=too-many-branches,too-many-locals
|
||||||
|
|
||||||
|
# Description of the hang:
|
||||||
|
# There is an incompatibility with patching 'thread' and the 'multiprocessing' module:
|
||||||
|
# The problem is that multiprocessing.queues.Queue uses a half-duplex multiprocessing.Pipe,
|
||||||
|
# which is implemented with os.pipe() and _multiprocessing.Connection. os.pipe isn't patched
|
||||||
|
# by gevent, as it returns just a fileno. _multiprocessing.Connection is an internal implementation
|
||||||
|
# class implemented in C, which exposes a 'poll(timeout)' method; under the covers, this issues a
|
||||||
|
# (blocking) select() call: hence the need for a real thread. Except for that method, we could
|
||||||
|
# almost replace Connection with gevent.fileobject.SocketAdapter, plus a trivial
|
||||||
|
# patch to os.pipe (below). Sigh, so close. (With a little work, we could replicate that method)
|
||||||
|
|
||||||
|
# import os
|
||||||
|
# import fcntl
|
||||||
|
# os_pipe = os.pipe
|
||||||
|
# def _pipe():
|
||||||
|
# r, w = os_pipe()
|
||||||
|
# fcntl.fcntl(r, fcntl.F_SETFL, os.O_NONBLOCK)
|
||||||
|
# fcntl.fcntl(w, fcntl.F_SETFL, os.O_NONBLOCK)
|
||||||
|
# return r, w
|
||||||
|
# os.pipe = _pipe
|
||||||
|
|
||||||
|
# The 'threading' module copies some attributes from the
|
||||||
|
# thread module the first time it is imported. If we patch 'thread'
|
||||||
|
# before that happens, then we store the wrong values in 'saved',
|
||||||
|
# So if we're going to patch threading, we either need to import it
|
||||||
|
# before we patch thread, or manually clean up the attributes that
|
||||||
|
# are in trouble. The latter is tricky because of the different names
|
||||||
|
# on different versions.
|
||||||
|
if threading:
|
||||||
|
threading_mod = __import__('threading')
|
||||||
|
# Capture the *real* current thread object before
|
||||||
|
# we start returning DummyThread objects, for comparison
|
||||||
|
# to the main thread.
|
||||||
|
orig_current_thread = threading_mod.current_thread()
|
||||||
|
else:
|
||||||
|
threading_mod = None
|
||||||
|
orig_current_thread = None
|
||||||
|
|
||||||
|
patch_module('thread')
|
||||||
|
|
||||||
|
if threading:
|
||||||
|
patch_module('threading')
|
||||||
|
|
||||||
|
if Event:
|
||||||
|
from gevent.event import Event
|
||||||
|
patch_item(threading_mod, 'Event', Event)
|
||||||
|
|
||||||
|
if existing_locks:
|
||||||
|
_patch_existing_locks(threading_mod)
|
||||||
|
|
||||||
|
if logging and 'logging' in sys.modules:
|
||||||
|
logging = __import__('logging')
|
||||||
|
patch_item(logging, '_lock', threading_mod.RLock())
|
||||||
|
for wr in logging._handlerList:
|
||||||
|
# In py26, these are actual handlers, not weakrefs
|
||||||
|
handler = wr() if callable(wr) else wr
|
||||||
|
if handler is None:
|
||||||
|
continue
|
||||||
|
if not hasattr(handler, 'lock'):
|
||||||
|
raise TypeError("Unknown/unsupported handler %r" % handler)
|
||||||
|
handler.lock = threading_mod.RLock()
|
||||||
|
|
||||||
|
if _threading_local:
|
||||||
|
_threading_local = __import__('_threading_local')
|
||||||
|
from gevent.local import local
|
||||||
|
patch_item(_threading_local, 'local', local)
|
||||||
|
|
||||||
|
def make_join_func(thread, thread_greenlet):
|
||||||
|
from gevent.hub import sleep
|
||||||
|
from time import time
|
||||||
|
|
||||||
|
def join(timeout=None):
|
||||||
|
end = None
|
||||||
|
if threading_mod.current_thread() is thread:
|
||||||
|
raise RuntimeError("Cannot join current thread")
|
||||||
|
if thread_greenlet is not None and thread_greenlet.dead:
|
||||||
|
return
|
||||||
|
if not thread.is_alive():
|
||||||
|
return
|
||||||
|
|
||||||
|
if timeout:
|
||||||
|
end = time() + timeout
|
||||||
|
|
||||||
|
while thread.is_alive():
|
||||||
|
if end is not None and time() > end:
|
||||||
|
return
|
||||||
|
sleep(0.01)
|
||||||
|
return join
|
||||||
|
|
||||||
|
if threading:
|
||||||
|
from gevent.threading import main_native_thread
|
||||||
|
|
||||||
|
for thread in threading_mod._active.values():
|
||||||
|
if thread == main_native_thread():
|
||||||
|
continue
|
||||||
|
thread.join = make_join_func(thread, None)
|
||||||
|
|
||||||
|
if sys.version_info[:2] >= (3, 4):
|
||||||
|
|
||||||
|
# Issue 18808 changes the nature of Thread.join() to use
|
||||||
|
# locks. This means that a greenlet spawned in the main thread
|
||||||
|
# (which is already running) cannot wait for the main thread---it
|
||||||
|
# hangs forever. We patch around this if possible. See also
|
||||||
|
# gevent.threading.
|
||||||
|
greenlet = __import__('greenlet')
|
||||||
|
|
||||||
|
if orig_current_thread == threading_mod.main_thread():
|
||||||
|
main_thread = threading_mod.main_thread()
|
||||||
|
_greenlet = main_thread._greenlet = greenlet.getcurrent()
|
||||||
|
|
||||||
|
main_thread.join = make_join_func(main_thread, _greenlet)
|
||||||
|
|
||||||
|
# Patch up the ident of the main thread to match. This
|
||||||
|
# matters if threading was imported before monkey-patching
|
||||||
|
# thread
|
||||||
|
oldid = main_thread.ident
|
||||||
|
main_thread._ident = threading_mod.get_ident()
|
||||||
|
if oldid in threading_mod._active:
|
||||||
|
threading_mod._active[main_thread.ident] = threading_mod._active[oldid]
|
||||||
|
if oldid != main_thread.ident:
|
||||||
|
del threading_mod._active[oldid]
|
||||||
|
else:
|
||||||
|
_queue_warning("Monkey-patching not on the main thread; "
|
||||||
|
"threading.main_thread().join() will hang from a greenlet",
|
||||||
|
_warnings)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_socket(dns=True, aggressive=True):
|
||||||
|
"""Replace the standard socket object with gevent's cooperative sockets.
|
||||||
|
|
||||||
|
If ``dns`` is true, also patch dns functions in :mod:`socket`.
|
||||||
|
"""
|
||||||
|
from gevent import socket
|
||||||
|
# Note: although it seems like it's not strictly necessary to monkey patch 'create_connection',
|
||||||
|
# it's better to do it. If 'create_connection' was not monkey patched, but the rest of socket module
|
||||||
|
# was, create_connection would still use "green" getaddrinfo and "green" socket.
|
||||||
|
# However, because gevent.socket.socket.connect is a Python function, the exception raised by it causes
|
||||||
|
# _socket object to be referenced by the frame, thus causing the next invocation of bind(source_address) to fail.
|
||||||
|
if dns:
|
||||||
|
items = socket.__implements__ # pylint:disable=no-member
|
||||||
|
else:
|
||||||
|
items = set(socket.__implements__) - set(socket.__dns__) # pylint:disable=no-member
|
||||||
|
patch_module('socket', items=items)
|
||||||
|
if aggressive:
|
||||||
|
if 'ssl' not in socket.__implements__: # pylint:disable=no-member
|
||||||
|
remove_item(socket, 'ssl')
|
||||||
|
|
||||||
|
|
||||||
|
def patch_dns():
|
||||||
|
"""Replace DNS functions in :mod:`socket` with cooperative versions.
|
||||||
|
|
||||||
|
This is only useful if :func:`patch_socket` has been called and is done automatically
|
||||||
|
by that method if requested.
|
||||||
|
"""
|
||||||
|
from gevent import socket
|
||||||
|
patch_module('socket', items=socket.__dns__) # pylint:disable=no-member
|
||||||
|
|
||||||
|
|
||||||
|
def patch_ssl():
|
||||||
|
"""Replace SSLSocket object and socket wrapping functions in :mod:`ssl` with cooperative versions.
|
||||||
|
|
||||||
|
This is only useful if :func:`patch_socket` has been called.
|
||||||
|
"""
|
||||||
|
patch_module('ssl')
|
||||||
|
|
||||||
|
|
||||||
|
def patch_select(aggressive=True):
|
||||||
|
"""
|
||||||
|
Replace :func:`select.select` with :func:`gevent.select.select`
|
||||||
|
and :func:`select.poll` with :class:`gevent.select.poll` (where available).
|
||||||
|
|
||||||
|
If ``aggressive`` is true (the default), also remove other
|
||||||
|
blocking functions from :mod:`select` and (on Python 3.4 and
|
||||||
|
above) :mod:`selectors`:
|
||||||
|
|
||||||
|
- :func:`select.epoll`
|
||||||
|
- :func:`select.kqueue`
|
||||||
|
- :func:`select.kevent`
|
||||||
|
- :func:`select.devpoll` (Python 3.5+)
|
||||||
|
- :class:`selectors.EpollSelector`
|
||||||
|
- :class:`selectors.KqueueSelector`
|
||||||
|
- :class:`selectors.DevpollSelector` (Python 3.5+)
|
||||||
|
"""
|
||||||
|
patch_module('select')
|
||||||
|
if aggressive:
|
||||||
|
select = __import__('select')
|
||||||
|
# since these are blocking we're removing them here. This makes some other
|
||||||
|
# modules (e.g. asyncore) non-blocking, as they use select that we provide
|
||||||
|
# when none of these are available.
|
||||||
|
remove_item(select, 'epoll')
|
||||||
|
remove_item(select, 'kqueue')
|
||||||
|
remove_item(select, 'kevent')
|
||||||
|
remove_item(select, 'devpoll')
|
||||||
|
|
||||||
|
if sys.version_info[:2] >= (3, 4):
|
||||||
|
# Python 3 wants to use `select.select` as a member function,
|
||||||
|
# leading to this error in selectors.py (because gevent.select.select is
|
||||||
|
# not a builtin and doesn't get the magic auto-static that they do)
|
||||||
|
# r, w, _ = self._select(self._readers, self._writers, [], timeout)
|
||||||
|
# TypeError: select() takes from 3 to 4 positional arguments but 5 were given
|
||||||
|
# Note that this obviously only happens if selectors was imported after we had patched
|
||||||
|
# select; but there is a code path that leads to it being imported first (but now we've
|
||||||
|
# patched select---so we can't compare them identically)
|
||||||
|
select = __import__('select') # Should be gevent-patched now
|
||||||
|
orig_select_select = get_original('select', 'select')
|
||||||
|
assert select.select is not orig_select_select
|
||||||
|
selectors = __import__('selectors')
|
||||||
|
if selectors.SelectSelector._select in (select.select, orig_select_select):
|
||||||
|
def _select(self, *args, **kwargs): # pylint:disable=unused-argument
|
||||||
|
return select.select(*args, **kwargs)
|
||||||
|
selectors.SelectSelector._select = _select
|
||||||
|
_select._gevent_monkey = True
|
||||||
|
|
||||||
|
if aggressive:
|
||||||
|
# If `selectors` had already been imported before we removed
|
||||||
|
# select.epoll|kqueue|devpoll, these may have been defined in terms
|
||||||
|
# of those functions. They'll fail at runtime.
|
||||||
|
remove_item(selectors, 'EpollSelector')
|
||||||
|
remove_item(selectors, 'KqueueSelector')
|
||||||
|
remove_item(selectors, 'DevpollSelector')
|
||||||
|
selectors.DefaultSelector = selectors.SelectSelector
|
||||||
|
|
||||||
|
|
||||||
|
def patch_subprocess():
|
||||||
|
"""
|
||||||
|
Replace :func:`subprocess.call`, :func:`subprocess.check_call`,
|
||||||
|
:func:`subprocess.check_output` and :class:`subprocess.Popen` with
|
||||||
|
:mod:`cooperative versions <gevent.subprocess>`.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
On Windows under Python 3, the API support may not completely match
|
||||||
|
the standard library.
|
||||||
|
|
||||||
|
"""
|
||||||
|
patch_module('subprocess')
|
||||||
|
|
||||||
|
|
||||||
|
def patch_builtins():
|
||||||
|
"""
|
||||||
|
Make the builtin __import__ function `greenlet safe`_ under Python 2.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
This does nothing under Python 3 as it is not necessary. Python 3 features
|
||||||
|
improved import locks that are per-module, not global.
|
||||||
|
|
||||||
|
.. _greenlet safe: https://github.com/gevent/gevent/issues/108
|
||||||
|
|
||||||
|
"""
|
||||||
|
if sys.version_info[:2] < (3, 3):
|
||||||
|
patch_module('builtins')
|
||||||
|
|
||||||
|
|
||||||
|
def patch_signal():
|
||||||
|
"""
|
||||||
|
Make the signal.signal function work with a monkey-patched os.
|
||||||
|
|
||||||
|
.. caution:: This method must be used with :func:`patch_os` to have proper SIGCHLD
|
||||||
|
handling. :func:`patch_all` calls both by default.
|
||||||
|
|
||||||
|
.. caution:: For proper SIGCHLD handling, you must yield to the event loop.
|
||||||
|
Using :func:`patch_all` is the easiest way to ensure this.
|
||||||
|
|
||||||
|
.. seealso:: :mod:`gevent.signal`
|
||||||
|
"""
|
||||||
|
patch_module("signal")
|
||||||
|
|
||||||
|
|
||||||
|
def _check_repatching(**module_settings):
|
||||||
|
_warnings = []
|
||||||
|
key = '_gevent_saved_patch_all'
|
||||||
|
if saved.get(key, module_settings) != module_settings:
|
||||||
|
_queue_warning("Patching more than once will result in the union of all True"
|
||||||
|
" parameters being patched",
|
||||||
|
_warnings)
|
||||||
|
|
||||||
|
first_time = key not in saved
|
||||||
|
saved[key] = module_settings
|
||||||
|
return _warnings, first_time
|
||||||
|
|
||||||
|
|
||||||
|
def patch_all(socket=True, dns=True, time=True, select=True, thread=True, os=True, ssl=True, httplib=False,
|
||||||
|
subprocess=True, sys=False, aggressive=True, Event=False,
|
||||||
|
builtins=True, signal=True):
|
||||||
|
"""
|
||||||
|
Do all of the default monkey patching (calls every other applicable
|
||||||
|
function in this module).
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1
|
||||||
|
Issue a :mod:`warning <warnings>` if this function is called multiple times
|
||||||
|
with different arguments. The second and subsequent calls will only add more
|
||||||
|
patches, they can never remove existing patches by setting an argument to ``False``.
|
||||||
|
.. versionchanged:: 1.1
|
||||||
|
Issue a :mod:`warning <warnings>` if this function is called with ``os=False``
|
||||||
|
and ``signal=True``. This will cause SIGCHLD handlers to not be called. This may
|
||||||
|
be an error in the future.
|
||||||
|
"""
|
||||||
|
# pylint:disable=too-many-locals,too-many-branches
|
||||||
|
|
||||||
|
# Check to see if they're changing the patched list
|
||||||
|
_warnings, first_time = _check_repatching(**locals())
|
||||||
|
if not _warnings and not first_time:
|
||||||
|
# Nothing to do, identical args to what we just
|
||||||
|
# did
|
||||||
|
return
|
||||||
|
|
||||||
|
# order is important
|
||||||
|
if os:
|
||||||
|
patch_os()
|
||||||
|
if time:
|
||||||
|
patch_time()
|
||||||
|
if thread:
|
||||||
|
patch_thread(Event=Event, _warnings=_warnings)
|
||||||
|
# sys must be patched after thread. in other cases threading._shutdown will be
|
||||||
|
# initiated to _MainThread with real thread ident
|
||||||
|
if sys:
|
||||||
|
patch_sys()
|
||||||
|
if socket:
|
||||||
|
patch_socket(dns=dns, aggressive=aggressive)
|
||||||
|
if select:
|
||||||
|
patch_select(aggressive=aggressive)
|
||||||
|
if ssl:
|
||||||
|
patch_ssl()
|
||||||
|
if httplib:
|
||||||
|
raise ValueError('gevent.httplib is no longer provided, httplib must be False')
|
||||||
|
if subprocess:
|
||||||
|
patch_subprocess()
|
||||||
|
if builtins:
|
||||||
|
patch_builtins()
|
||||||
|
if signal:
|
||||||
|
if not os:
|
||||||
|
_queue_warning('Patching signal but not os will result in SIGCHLD handlers'
|
||||||
|
' installed after this not being called and os.waitpid may not'
|
||||||
|
' function correctly if gevent.subprocess is used. This may raise an'
|
||||||
|
' error in the future.',
|
||||||
|
_warnings)
|
||||||
|
patch_signal()
|
||||||
|
|
||||||
|
_process_warnings(_warnings)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
args = {}
|
||||||
|
argv = sys.argv[1:]
|
||||||
|
verbose = False
|
||||||
|
script_help, patch_all_args, modules = _get_script_help()
|
||||||
|
while argv and argv[0].startswith('--'):
|
||||||
|
option = argv[0][2:]
|
||||||
|
if option == 'verbose':
|
||||||
|
verbose = True
|
||||||
|
elif option.startswith('no-') and option.replace('no-', '') in patch_all_args:
|
||||||
|
args[option[3:]] = False
|
||||||
|
elif option in patch_all_args:
|
||||||
|
args[option] = True
|
||||||
|
if option in modules:
|
||||||
|
for module in modules:
|
||||||
|
args.setdefault(module, False)
|
||||||
|
else:
|
||||||
|
sys.exit(script_help + '\n\n' + 'Cannot patch %r' % option)
|
||||||
|
del argv[0]
|
||||||
|
# TODO: break on --
|
||||||
|
if verbose:
|
||||||
|
import pprint
|
||||||
|
import os
|
||||||
|
print('gevent.monkey.patch_all(%s)' % ', '.join('%s=%s' % item for item in args.items()))
|
||||||
|
print('sys.version=%s' % (sys.version.strip().replace('\n', ' '), ))
|
||||||
|
print('sys.path=%s' % pprint.pformat(sys.path))
|
||||||
|
print('sys.modules=%s' % pprint.pformat(sorted(sys.modules.keys())))
|
||||||
|
print('cwd=%s' % os.getcwd())
|
||||||
|
|
||||||
|
patch_all(**args)
|
||||||
|
if argv:
|
||||||
|
sys.argv = argv
|
||||||
|
__package__ = None
|
||||||
|
assert __package__ is None
|
||||||
|
globals()['__file__'] = sys.argv[0] # issue #302
|
||||||
|
globals()['__package__'] = None # issue #975: make script be its own package
|
||||||
|
with open(sys.argv[0]) as f:
|
||||||
|
# Be sure to exec in globals to avoid import pollution. Also #975.
|
||||||
|
exec(f.read(), globals())
|
||||||
|
else:
|
||||||
|
print(script_help)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_script_help():
|
||||||
|
from inspect import getargspec
|
||||||
|
patch_all_args = getargspec(patch_all)[0] # pylint:disable=deprecated-method
|
||||||
|
modules = [x for x in patch_all_args if 'patch_' + x in globals()]
|
||||||
|
script_help = """gevent.monkey - monkey patch the standard modules to use gevent.
|
||||||
|
|
||||||
|
USAGE: python -m gevent.monkey [MONKEY OPTIONS] script [SCRIPT OPTIONS]
|
||||||
|
|
||||||
|
If no OPTIONS present, monkey patches all the modules it can patch.
|
||||||
|
You can exclude a module with --no-module, e.g. --no-thread. You can
|
||||||
|
specify a module to patch with --module, e.g. --socket. In the latter
|
||||||
|
case only the modules specified on the command line will be patched.
|
||||||
|
|
||||||
|
MONKEY OPTIONS: --verbose %s""" % ', '.join('--[no-]%s' % m for m in modules)
|
||||||
|
return script_help, patch_all_args, modules
|
||||||
|
|
||||||
|
main.__doc__ = _get_script_help()[0]
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
468
python/gevent/os.py
Normal file
468
python/gevent/os.py
Normal file
@@ -0,0 +1,468 @@
|
|||||||
|
"""
|
||||||
|
Low-level operating system functions from :mod:`os`.
|
||||||
|
|
||||||
|
Cooperative I/O
|
||||||
|
===============
|
||||||
|
|
||||||
|
This module provides cooperative versions of :func:`os.read` and
|
||||||
|
:func:`os.write`. These functions are *not* monkey-patched; you
|
||||||
|
must explicitly call them or monkey patch them yourself.
|
||||||
|
|
||||||
|
POSIX functions
|
||||||
|
---------------
|
||||||
|
|
||||||
|
On POSIX, non-blocking IO is available.
|
||||||
|
|
||||||
|
- :func:`nb_read`
|
||||||
|
- :func:`nb_write`
|
||||||
|
- :func:`make_nonblocking`
|
||||||
|
|
||||||
|
All Platforms
|
||||||
|
-------------
|
||||||
|
|
||||||
|
On non-POSIX platforms (e.g., Windows), non-blocking IO is not
|
||||||
|
available. On those platforms (and on POSIX), cooperative IO can
|
||||||
|
be done with the threadpool.
|
||||||
|
|
||||||
|
- :func:`tp_read`
|
||||||
|
- :func:`tp_write`
|
||||||
|
|
||||||
|
Child Processes
|
||||||
|
===============
|
||||||
|
|
||||||
|
The functions :func:`fork` and (on POSIX) :func:`forkpty` and :func:`waitpid` can be used
|
||||||
|
to manage child processes.
|
||||||
|
|
||||||
|
.. warning::
|
||||||
|
|
||||||
|
Forking a process that uses greenlets does not eliminate all non-running
|
||||||
|
greenlets. Any that were scheduled in the hub of the forking thread in the parent
|
||||||
|
remain scheduled in the child; compare this to how normal threads operate. (This behaviour
|
||||||
|
may change is a subsequent major release.)
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from gevent.hub import get_hub, reinit
|
||||||
|
from gevent._compat import PY3
|
||||||
|
from gevent._util import copy_globals
|
||||||
|
import errno
|
||||||
|
|
||||||
|
EAGAIN = getattr(errno, 'EAGAIN', 11)
|
||||||
|
|
||||||
|
try:
|
||||||
|
import fcntl
|
||||||
|
except ImportError:
|
||||||
|
fcntl = None
|
||||||
|
|
||||||
|
__implements__ = ['fork']
|
||||||
|
__extensions__ = ['tp_read', 'tp_write']
|
||||||
|
|
||||||
|
_read = os.read
|
||||||
|
_write = os.write
|
||||||
|
|
||||||
|
|
||||||
|
ignored_errors = [EAGAIN, errno.EINTR]
|
||||||
|
|
||||||
|
|
||||||
|
if fcntl:
|
||||||
|
|
||||||
|
__extensions__ += ['make_nonblocking', 'nb_read', 'nb_write']
|
||||||
|
|
||||||
|
def make_nonblocking(fd):
|
||||||
|
"""Put the file descriptor *fd* into non-blocking mode if possible.
|
||||||
|
|
||||||
|
:return: A boolean value that evaluates to True if successful."""
|
||||||
|
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
|
||||||
|
if not bool(flags & os.O_NONBLOCK):
|
||||||
|
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
|
||||||
|
return True
|
||||||
|
|
||||||
|
def nb_read(fd, n):
|
||||||
|
"""Read up to `n` bytes from file descriptor `fd`. Return a string
|
||||||
|
containing the bytes read. If end-of-file is reached, an empty string
|
||||||
|
is returned.
|
||||||
|
|
||||||
|
The descriptor must be in non-blocking mode.
|
||||||
|
"""
|
||||||
|
hub, event = None, None
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return _read(fd, n)
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno not in ignored_errors:
|
||||||
|
raise
|
||||||
|
if not PY3:
|
||||||
|
sys.exc_clear()
|
||||||
|
if hub is None:
|
||||||
|
hub = get_hub()
|
||||||
|
event = hub.loop.io(fd, 1)
|
||||||
|
hub.wait(event)
|
||||||
|
|
||||||
|
def nb_write(fd, buf):
|
||||||
|
"""Write bytes from buffer `buf` to file descriptor `fd`. Return the
|
||||||
|
number of bytes written.
|
||||||
|
|
||||||
|
The file descriptor must be in non-blocking mode.
|
||||||
|
"""
|
||||||
|
hub, event = None, None
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return _write(fd, buf)
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno not in ignored_errors:
|
||||||
|
raise
|
||||||
|
if not PY3:
|
||||||
|
sys.exc_clear()
|
||||||
|
if hub is None:
|
||||||
|
hub = get_hub()
|
||||||
|
event = hub.loop.io(fd, 2)
|
||||||
|
hub.wait(event)
|
||||||
|
|
||||||
|
|
||||||
|
def tp_read(fd, n):
|
||||||
|
"""Read up to *n* bytes from file descriptor *fd*. Return a string
|
||||||
|
containing the bytes read. If end-of-file is reached, an empty string
|
||||||
|
is returned.
|
||||||
|
|
||||||
|
Reading is done using the threadpool.
|
||||||
|
"""
|
||||||
|
return get_hub().threadpool.apply(_read, (fd, n))
|
||||||
|
|
||||||
|
|
||||||
|
def tp_write(fd, buf):
|
||||||
|
"""Write bytes from buffer *buf* to file descriptor *fd*. Return the
|
||||||
|
number of bytes written.
|
||||||
|
|
||||||
|
Writing is done using the threadpool.
|
||||||
|
"""
|
||||||
|
return get_hub().threadpool.apply(_write, (fd, buf))
|
||||||
|
|
||||||
|
|
||||||
|
if hasattr(os, 'fork'):
|
||||||
|
# pylint:disable=function-redefined,redefined-outer-name
|
||||||
|
|
||||||
|
_raw_fork = os.fork
|
||||||
|
|
||||||
|
def fork_gevent():
|
||||||
|
"""
|
||||||
|
Forks the process using :func:`os.fork` and prepares the
|
||||||
|
child process to continue using gevent before returning.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
The PID returned by this function may not be waitable with
|
||||||
|
either the original :func:`os.waitpid` or this module's
|
||||||
|
:func:`waitpid` and it may not generate SIGCHLD signals if
|
||||||
|
libev child watchers are or ever have been in use. For
|
||||||
|
example, the :mod:`gevent.subprocess` module uses libev
|
||||||
|
child watchers (which parts of gevent use libev child
|
||||||
|
watchers is subject to change at any time). Most
|
||||||
|
applications should use :func:`fork_and_watch`, which is
|
||||||
|
monkey-patched as the default replacement for
|
||||||
|
:func:`os.fork` and implements the ``fork`` function of
|
||||||
|
this module by default, unless the environment variable
|
||||||
|
``GEVENT_NOWAITPID`` is defined before this module is
|
||||||
|
imported.
|
||||||
|
|
||||||
|
.. versionadded:: 1.1b2
|
||||||
|
"""
|
||||||
|
result = _raw_fork()
|
||||||
|
if not result:
|
||||||
|
reinit()
|
||||||
|
return result
|
||||||
|
|
||||||
|
def fork():
|
||||||
|
"""
|
||||||
|
A wrapper for :func:`fork_gevent` for non-POSIX platforms.
|
||||||
|
"""
|
||||||
|
return fork_gevent()
|
||||||
|
|
||||||
|
if hasattr(os, 'forkpty'):
|
||||||
|
_raw_forkpty = os.forkpty
|
||||||
|
|
||||||
|
def forkpty_gevent():
|
||||||
|
"""
|
||||||
|
Forks the process using :func:`os.forkpty` and prepares the
|
||||||
|
child process to continue using gevent before returning.
|
||||||
|
|
||||||
|
Returns a tuple (pid, master_fd). The `master_fd` is *not* put into
|
||||||
|
non-blocking mode.
|
||||||
|
|
||||||
|
Availability: Some Unix systems.
|
||||||
|
|
||||||
|
.. seealso:: This function has the same limitations as :func:`fork_gevent`.
|
||||||
|
|
||||||
|
.. versionadded:: 1.1b5
|
||||||
|
"""
|
||||||
|
pid, master_fd = _raw_forkpty()
|
||||||
|
if not pid:
|
||||||
|
reinit()
|
||||||
|
return pid, master_fd
|
||||||
|
|
||||||
|
forkpty = forkpty_gevent
|
||||||
|
|
||||||
|
__implements__.append('forkpty')
|
||||||
|
__extensions__.append("forkpty_gevent")
|
||||||
|
|
||||||
|
if hasattr(os, 'WNOWAIT') or hasattr(os, 'WNOHANG'):
|
||||||
|
# We can only do this on POSIX
|
||||||
|
import time
|
||||||
|
|
||||||
|
_waitpid = os.waitpid
|
||||||
|
_WNOHANG = os.WNOHANG
|
||||||
|
|
||||||
|
# replaced by the signal module.
|
||||||
|
_on_child_hook = lambda: None
|
||||||
|
|
||||||
|
# {pid -> watcher or tuple(pid, rstatus, timestamp)}
|
||||||
|
_watched_children = {}
|
||||||
|
|
||||||
|
def _on_child(watcher, callback):
|
||||||
|
# XXX: Could handle tracing here by not stopping
|
||||||
|
# until the pid is terminated
|
||||||
|
watcher.stop()
|
||||||
|
_watched_children[watcher.pid] = (watcher.pid, watcher.rstatus, time.time())
|
||||||
|
if callback:
|
||||||
|
callback(watcher)
|
||||||
|
# dispatch an "event"; used by gevent.signal.signal
|
||||||
|
_on_child_hook()
|
||||||
|
# now is as good a time as any to reap children
|
||||||
|
_reap_children()
|
||||||
|
|
||||||
|
def _reap_children(timeout=60):
|
||||||
|
# Remove all the dead children that haven't been waited on
|
||||||
|
# for the *timeout* seconds.
|
||||||
|
# Some platforms queue delivery of SIGCHLD for all children that die;
|
||||||
|
# in that case, a well-behaved application should call waitpid() for each
|
||||||
|
# signal.
|
||||||
|
# Some platforms (linux) only guarantee one delivery if multiple children
|
||||||
|
# die. On that platform, the well-behave application calls waitpid() in a loop
|
||||||
|
# until it gets back -1, indicating no more dead children need to be waited for.
|
||||||
|
# In either case, waitpid should be called the same number of times as dead children,
|
||||||
|
# thus removing all the watchers when a SIGCHLD arrives. The (generous) timeout
|
||||||
|
# is to work with applications that neglect to call waitpid and prevent "unlimited"
|
||||||
|
# growth.
|
||||||
|
# Note that we don't watch for the case of pid wraparound. That is, we fork a new
|
||||||
|
# child with the same pid as an existing watcher, but the child is already dead,
|
||||||
|
# just not waited on yet.
|
||||||
|
now = time.time()
|
||||||
|
oldest_allowed = now - timeout
|
||||||
|
dead = [pid for pid, val
|
||||||
|
in _watched_children.items()
|
||||||
|
if isinstance(val, tuple) and val[2] < oldest_allowed]
|
||||||
|
for pid in dead:
|
||||||
|
del _watched_children[pid]
|
||||||
|
|
||||||
|
def waitpid(pid, options):
|
||||||
|
"""
|
||||||
|
Wait for a child process to finish.
|
||||||
|
|
||||||
|
If the child process was spawned using
|
||||||
|
:func:`fork_and_watch`, then this function behaves
|
||||||
|
cooperatively. If not, it *may* have race conditions; see
|
||||||
|
:func:`fork_gevent` for more information.
|
||||||
|
|
||||||
|
The arguments are as for the underlying
|
||||||
|
:func:`os.waitpid`. Some combinations of *options* may not
|
||||||
|
be supported cooperatively (as of 1.1 that includes
|
||||||
|
WUNTRACED). Using a *pid* of 0 to request waiting on only processes
|
||||||
|
from the current process group is not cooperative.
|
||||||
|
|
||||||
|
Availability: POSIX.
|
||||||
|
|
||||||
|
.. versionadded:: 1.1b1
|
||||||
|
.. versionchanged:: 1.2a1
|
||||||
|
More cases are handled in a cooperative manner.
|
||||||
|
"""
|
||||||
|
# XXX Does not handle tracing children
|
||||||
|
|
||||||
|
# So long as libev's loop doesn't run, it's OK to add
|
||||||
|
# child watchers. The SIGCHLD handler only feeds events
|
||||||
|
# for the next iteration of the loop to handle. (And the
|
||||||
|
# signal handler itself is only called from the next loop
|
||||||
|
# iteration.)
|
||||||
|
|
||||||
|
if pid <= 0:
|
||||||
|
# magic functions for multiple children.
|
||||||
|
if pid == -1:
|
||||||
|
# Any child. If we have one that we're watching and that finished,
|
||||||
|
# we will use that one. Otherwise, let the OS take care of it.
|
||||||
|
for k, v in _watched_children.items():
|
||||||
|
if isinstance(v, tuple):
|
||||||
|
pid = k
|
||||||
|
break
|
||||||
|
if pid <= 0:
|
||||||
|
# We didn't have one that was ready. If there are
|
||||||
|
# no funky options set, and the pid was -1
|
||||||
|
# (meaning any process, not 0, which means process
|
||||||
|
# group--- libev doesn't know about process
|
||||||
|
# groups) then we can use a child watcher of pid 0; otherwise,
|
||||||
|
# pass through to the OS.
|
||||||
|
if pid == -1 and options == 0:
|
||||||
|
hub = get_hub()
|
||||||
|
watcher = hub.loop.child(0, False)
|
||||||
|
hub.wait(watcher)
|
||||||
|
return watcher.rpid, watcher.rstatus
|
||||||
|
# There were funky options/pid, so we must go to the OS.
|
||||||
|
return _waitpid(pid, options)
|
||||||
|
|
||||||
|
if pid in _watched_children:
|
||||||
|
# yes, we're watching it
|
||||||
|
if options & _WNOHANG or isinstance(_watched_children[pid], tuple):
|
||||||
|
# We're either asked not to block, or it already finished, in which
|
||||||
|
# case blocking doesn't matter
|
||||||
|
result = _watched_children[pid]
|
||||||
|
if isinstance(result, tuple):
|
||||||
|
# it finished. libev child watchers
|
||||||
|
# are one-shot
|
||||||
|
del _watched_children[pid]
|
||||||
|
return result[:2]
|
||||||
|
# it's not finished
|
||||||
|
return (0, 0)
|
||||||
|
else:
|
||||||
|
# Ok, we need to "block". Do so via a watcher so that we're
|
||||||
|
# cooperative. We know it's our child, etc, so this should work.
|
||||||
|
watcher = _watched_children[pid]
|
||||||
|
# We can't start a watcher that's already started,
|
||||||
|
# so we can't reuse the existing watcher.
|
||||||
|
new_watcher = watcher.loop.child(pid, False)
|
||||||
|
get_hub().wait(new_watcher)
|
||||||
|
# Ok, so now the new watcher is done. That means
|
||||||
|
# the old watcher's callback (_on_child) should
|
||||||
|
# have fired, potentially taking this child out of
|
||||||
|
# _watched_children (but that could depend on how
|
||||||
|
# many callbacks there were to run, so use the
|
||||||
|
# watcher object directly; libev sets all the
|
||||||
|
# watchers at the same time).
|
||||||
|
return watcher.rpid, watcher.rstatus
|
||||||
|
|
||||||
|
# we're not watching it and it may not even be our child,
|
||||||
|
# so we must go to the OS to be sure to get the right semantics (exception)
|
||||||
|
return _waitpid(pid, options)
|
||||||
|
|
||||||
|
def fork_and_watch(callback=None, loop=None, ref=False, fork=fork_gevent):
|
||||||
|
"""
|
||||||
|
Fork a child process and start a child watcher for it in the parent process.
|
||||||
|
|
||||||
|
This call cooperates with :func:`waitpid` to enable cooperatively waiting
|
||||||
|
for children to finish. When monkey-patching, these functions are patched in as
|
||||||
|
:func:`os.fork` and :func:`os.waitpid`, respectively.
|
||||||
|
|
||||||
|
In the child process, this function calls :func:`gevent.hub.reinit` before returning.
|
||||||
|
|
||||||
|
Availability: POSIX.
|
||||||
|
|
||||||
|
:keyword callback: If given, a callable that will be called with the child watcher
|
||||||
|
when the child finishes.
|
||||||
|
:keyword loop: The loop to start the watcher in. Defaults to the
|
||||||
|
loop of the current hub.
|
||||||
|
:keyword fork: The fork function. Defaults to :func:`the one defined in this
|
||||||
|
module <gevent.os.fork_gevent>` (which automatically calls :func:`gevent.hub.reinit`).
|
||||||
|
Pass the builtin :func:`os.fork` function if you do not need to
|
||||||
|
initialize gevent in the child process.
|
||||||
|
|
||||||
|
.. versionadded:: 1.1b1
|
||||||
|
.. seealso::
|
||||||
|
:func:`gevent.monkey.get_original` To access the builtin :func:`os.fork`.
|
||||||
|
"""
|
||||||
|
pid = fork()
|
||||||
|
if pid:
|
||||||
|
# parent
|
||||||
|
loop = loop or get_hub().loop
|
||||||
|
watcher = loop.child(pid, ref=ref)
|
||||||
|
_watched_children[pid] = watcher
|
||||||
|
watcher.start(_on_child, watcher, callback)
|
||||||
|
return pid
|
||||||
|
|
||||||
|
__extensions__.append('fork_and_watch')
|
||||||
|
__extensions__.append('fork_gevent')
|
||||||
|
|
||||||
|
if 'forkpty' in __implements__:
|
||||||
|
def forkpty_and_watch(callback=None, loop=None, ref=False, forkpty=forkpty_gevent):
|
||||||
|
"""
|
||||||
|
Like :func:`fork_and_watch`, except using :func:`forkpty_gevent`.
|
||||||
|
|
||||||
|
Availability: Some Unix systems.
|
||||||
|
|
||||||
|
.. versionadded:: 1.1b5
|
||||||
|
"""
|
||||||
|
result = []
|
||||||
|
|
||||||
|
def _fork():
|
||||||
|
pid_and_fd = forkpty()
|
||||||
|
result.append(pid_and_fd)
|
||||||
|
return pid_and_fd[0]
|
||||||
|
fork_and_watch(callback, loop, ref, _fork)
|
||||||
|
return result[0]
|
||||||
|
|
||||||
|
__extensions__.append('forkpty_and_watch')
|
||||||
|
|
||||||
|
# Watch children by default
|
||||||
|
if not os.getenv('GEVENT_NOWAITPID'):
|
||||||
|
# Broken out into separate functions instead of simple name aliases
|
||||||
|
# for documentation purposes.
|
||||||
|
def fork(*args, **kwargs):
|
||||||
|
"""
|
||||||
|
Forks a child process and starts a child watcher for it in the
|
||||||
|
parent process so that ``waitpid`` and SIGCHLD work as expected.
|
||||||
|
|
||||||
|
This implementation of ``fork`` is a wrapper for :func:`fork_and_watch`
|
||||||
|
when the environment variable ``GEVENT_NOWAITPID`` is *not* defined.
|
||||||
|
This is the default and should be used by most applications.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1b2
|
||||||
|
"""
|
||||||
|
# take any args to match fork_and_watch
|
||||||
|
return fork_and_watch(*args, **kwargs)
|
||||||
|
|
||||||
|
if 'forkpty' in __implements__:
|
||||||
|
def forkpty(*args, **kwargs):
|
||||||
|
"""
|
||||||
|
Like :func:`fork`, but using :func:`forkpty_gevent`.
|
||||||
|
|
||||||
|
This implementation of ``forkpty`` is a wrapper for :func:`forkpty_and_watch`
|
||||||
|
when the environment variable ``GEVENT_NOWAITPID`` is *not* defined.
|
||||||
|
This is the default and should be used by most applications.
|
||||||
|
|
||||||
|
.. versionadded:: 1.1b5
|
||||||
|
"""
|
||||||
|
# take any args to match fork_and_watch
|
||||||
|
return forkpty_and_watch(*args, **kwargs)
|
||||||
|
__implements__.append("waitpid")
|
||||||
|
else:
|
||||||
|
def fork():
|
||||||
|
"""
|
||||||
|
Forks a child process, initializes gevent in the child,
|
||||||
|
but *does not* prepare the parent to wait for the child or receive SIGCHLD.
|
||||||
|
|
||||||
|
This implementation of ``fork`` is a wrapper for :func:`fork_gevent`
|
||||||
|
when the environment variable ``GEVENT_NOWAITPID`` *is* defined.
|
||||||
|
This is not recommended for most applications.
|
||||||
|
"""
|
||||||
|
return fork_gevent()
|
||||||
|
|
||||||
|
if 'forkpty' in __implements__:
|
||||||
|
def forkpty():
|
||||||
|
"""
|
||||||
|
Like :func:`fork`, but using :func:`os.forkpty`
|
||||||
|
|
||||||
|
This implementation of ``forkpty`` is a wrapper for :func:`forkpty_gevent`
|
||||||
|
when the environment variable ``GEVENT_NOWAITPID`` *is* defined.
|
||||||
|
This is not recommended for most applications.
|
||||||
|
|
||||||
|
.. versionadded:: 1.1b5
|
||||||
|
"""
|
||||||
|
return forkpty_gevent()
|
||||||
|
__extensions__.append("waitpid")
|
||||||
|
|
||||||
|
else:
|
||||||
|
__implements__.remove('fork')
|
||||||
|
|
||||||
|
__imports__ = copy_globals(os, globals(),
|
||||||
|
names_to_ignore=__implements__ + __extensions__,
|
||||||
|
dunder_names_to_keep=())
|
||||||
|
|
||||||
|
__all__ = list(set(__implements__ + __extensions__))
|
||||||
759
python/gevent/pool.py
Normal file
759
python/gevent/pool.py
Normal file
@@ -0,0 +1,759 @@
|
|||||||
|
# Copyright (c) 2009-2011 Denis Bilenko. See LICENSE for details.
|
||||||
|
"""
|
||||||
|
Managing greenlets in a group.
|
||||||
|
|
||||||
|
The :class:`Group` class in this module abstracts a group of running
|
||||||
|
greenlets. When a greenlet dies, it's automatically removed from the
|
||||||
|
group. All running greenlets in a group can be waited on with
|
||||||
|
:meth:`Group.join`, or all running greenlets can be killed with
|
||||||
|
:meth:`Group.kill`.
|
||||||
|
|
||||||
|
The :class:`Pool` class, which is a subclass of :class:`Group`,
|
||||||
|
provides a way to limit concurrency: its :meth:`spawn <Pool.spawn>`
|
||||||
|
method blocks if the number of greenlets in the pool has already
|
||||||
|
reached the limit, until there is a free slot.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from bisect import insort_right
|
||||||
|
try:
|
||||||
|
from itertools import izip
|
||||||
|
except ImportError:
|
||||||
|
# Python 3
|
||||||
|
izip = zip
|
||||||
|
|
||||||
|
from gevent.hub import GreenletExit, getcurrent, kill as _kill
|
||||||
|
from gevent.greenlet import joinall, Greenlet
|
||||||
|
from gevent.timeout import Timeout
|
||||||
|
from gevent.event import Event
|
||||||
|
from gevent.lock import Semaphore, DummySemaphore
|
||||||
|
|
||||||
|
__all__ = ['Group', 'Pool']
|
||||||
|
|
||||||
|
|
||||||
|
class IMapUnordered(Greenlet):
|
||||||
|
"""
|
||||||
|
At iterator of map results.
|
||||||
|
"""
|
||||||
|
|
||||||
|
_zipped = False
|
||||||
|
|
||||||
|
def __init__(self, func, iterable, spawn=None, maxsize=None, _zipped=False):
|
||||||
|
"""
|
||||||
|
An iterator that.
|
||||||
|
|
||||||
|
:keyword int maxsize: If given and not-None, specifies the maximum number of
|
||||||
|
finished results that will be allowed to accumulated awaiting the reader;
|
||||||
|
more than that number of results will cause map function greenlets to begin
|
||||||
|
to block. This is most useful is there is a great disparity in the speed of
|
||||||
|
the mapping code and the consumer and the results consume a great deal of resources.
|
||||||
|
Using a bound is more computationally expensive than not using a bound.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1b3
|
||||||
|
Added the *maxsize* parameter.
|
||||||
|
"""
|
||||||
|
from gevent.queue import Queue
|
||||||
|
Greenlet.__init__(self)
|
||||||
|
if spawn is not None:
|
||||||
|
self.spawn = spawn
|
||||||
|
if _zipped:
|
||||||
|
self._zipped = _zipped
|
||||||
|
self.func = func
|
||||||
|
self.iterable = iterable
|
||||||
|
self.queue = Queue()
|
||||||
|
if maxsize:
|
||||||
|
# Bounding the queue is not enough if we want to keep from
|
||||||
|
# accumulating objects; the result value will be around as
|
||||||
|
# the greenlet's result, blocked on self.queue.put(), and
|
||||||
|
# we'll go on to spawn another greenlet, which in turn can
|
||||||
|
# create the result. So we need a semaphore to prevent a
|
||||||
|
# greenlet from exiting while the queue is full so that we
|
||||||
|
# don't spawn the next greenlet (assuming that self.spawn
|
||||||
|
# is of course bounded). (Alternatively we could have the
|
||||||
|
# greenlet itself do the insert into the pool, but that
|
||||||
|
# takes some rework).
|
||||||
|
#
|
||||||
|
# Given the use of a semaphore at this level, sizing the queue becomes
|
||||||
|
# redundant, and that lets us avoid having to use self.link() instead
|
||||||
|
# of self.rawlink() to avoid having blocking methods called in the
|
||||||
|
# hub greenlet.
|
||||||
|
factory = Semaphore
|
||||||
|
else:
|
||||||
|
factory = DummySemaphore
|
||||||
|
self._result_semaphore = factory(maxsize)
|
||||||
|
|
||||||
|
self.count = 0
|
||||||
|
self.finished = False
|
||||||
|
# If the queue size is unbounded, then we want to call all
|
||||||
|
# the links (_on_finish and _on_result) directly in the hub greenlet
|
||||||
|
# for efficiency. However, if the queue is bounded, we can't do that if
|
||||||
|
# the queue might block (because if there's no waiter the hub can switch to,
|
||||||
|
# the queue simply raises Full). Therefore, in that case, we use
|
||||||
|
# the safer, somewhat-slower (because it spawns a greenlet) link() methods.
|
||||||
|
# This means that _on_finish and _on_result can be called and interleaved in any order
|
||||||
|
# if the call to self.queue.put() blocks..
|
||||||
|
# Note that right now we're not bounding the queue, instead using a semaphore.
|
||||||
|
self.rawlink(self._on_finish)
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def next(self):
|
||||||
|
self._result_semaphore.release()
|
||||||
|
value = self._inext()
|
||||||
|
if isinstance(value, Failure):
|
||||||
|
raise value.exc
|
||||||
|
return value
|
||||||
|
__next__ = next
|
||||||
|
|
||||||
|
def _inext(self):
|
||||||
|
return self.queue.get()
|
||||||
|
|
||||||
|
def _ispawn(self, func, item):
|
||||||
|
self._result_semaphore.acquire()
|
||||||
|
self.count += 1
|
||||||
|
g = self.spawn(func, item) if not self._zipped else self.spawn(func, *item)
|
||||||
|
g.rawlink(self._on_result)
|
||||||
|
return g
|
||||||
|
|
||||||
|
def _run(self): # pylint:disable=method-hidden
|
||||||
|
try:
|
||||||
|
func = self.func
|
||||||
|
for item in self.iterable:
|
||||||
|
self._ispawn(func, item)
|
||||||
|
finally:
|
||||||
|
self.__dict__.pop('spawn', None)
|
||||||
|
self.__dict__.pop('func', None)
|
||||||
|
self.__dict__.pop('iterable', None)
|
||||||
|
|
||||||
|
def _on_result(self, greenlet):
|
||||||
|
# This method can either be called in the hub greenlet (if the
|
||||||
|
# queue is unbounded) or its own greenlet. If it's called in
|
||||||
|
# its own greenlet, the calls to put() may block and switch
|
||||||
|
# greenlets, which in turn could mutate our state. So any
|
||||||
|
# state on this object that we need to look at, notably
|
||||||
|
# self.count, we need to capture or mutate *before* we put.
|
||||||
|
# (Note that right now we're not bounding the queue, but we may
|
||||||
|
# choose to do so in the future so this implementation will be left in case.)
|
||||||
|
self.count -= 1
|
||||||
|
count = self.count
|
||||||
|
finished = self.finished
|
||||||
|
ready = self.ready()
|
||||||
|
put_finished = False
|
||||||
|
|
||||||
|
if ready and count <= 0 and not finished:
|
||||||
|
finished = self.finished = True
|
||||||
|
put_finished = True
|
||||||
|
|
||||||
|
if greenlet.successful():
|
||||||
|
self.queue.put(self._iqueue_value_for_success(greenlet))
|
||||||
|
else:
|
||||||
|
self.queue.put(self._iqueue_value_for_failure(greenlet))
|
||||||
|
|
||||||
|
if put_finished:
|
||||||
|
self.queue.put(self._iqueue_value_for_finished())
|
||||||
|
|
||||||
|
def _on_finish(self, _self):
|
||||||
|
if self.finished:
|
||||||
|
return
|
||||||
|
|
||||||
|
if not self.successful():
|
||||||
|
self.finished = True
|
||||||
|
self.queue.put(self._iqueue_value_for_self_failure())
|
||||||
|
return
|
||||||
|
|
||||||
|
if self.count <= 0:
|
||||||
|
self.finished = True
|
||||||
|
self.queue.put(self._iqueue_value_for_finished())
|
||||||
|
|
||||||
|
def _iqueue_value_for_success(self, greenlet):
|
||||||
|
return greenlet.value
|
||||||
|
|
||||||
|
def _iqueue_value_for_failure(self, greenlet):
|
||||||
|
return Failure(greenlet.exception, getattr(greenlet, '_raise_exception'))
|
||||||
|
|
||||||
|
def _iqueue_value_for_finished(self):
|
||||||
|
return Failure(StopIteration)
|
||||||
|
|
||||||
|
def _iqueue_value_for_self_failure(self):
|
||||||
|
return Failure(self.exception, self._raise_exception)
|
||||||
|
|
||||||
|
|
||||||
|
class IMap(IMapUnordered):
|
||||||
|
# A specialization of IMapUnordered that returns items
|
||||||
|
# in the order in which they were generated, not
|
||||||
|
# the order in which they finish.
|
||||||
|
# We do this by storing tuples (order, value) in the queue
|
||||||
|
# not just value.
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.waiting = [] # QQQ maybe deque will work faster there?
|
||||||
|
self.index = 0
|
||||||
|
self.maxindex = -1
|
||||||
|
IMapUnordered.__init__(self, *args, **kwargs)
|
||||||
|
|
||||||
|
def _inext(self):
|
||||||
|
while True:
|
||||||
|
if self.waiting and self.waiting[0][0] <= self.index:
|
||||||
|
_, value = self.waiting.pop(0)
|
||||||
|
else:
|
||||||
|
index, value = self.queue.get()
|
||||||
|
if index > self.index:
|
||||||
|
insort_right(self.waiting, (index, value))
|
||||||
|
continue
|
||||||
|
self.index += 1
|
||||||
|
return value
|
||||||
|
|
||||||
|
def _ispawn(self, func, item):
|
||||||
|
g = IMapUnordered._ispawn(self, func, item)
|
||||||
|
self.maxindex += 1
|
||||||
|
g.index = self.maxindex
|
||||||
|
return g
|
||||||
|
|
||||||
|
def _iqueue_value_for_success(self, greenlet):
|
||||||
|
return (greenlet.index, IMapUnordered._iqueue_value_for_success(self, greenlet))
|
||||||
|
|
||||||
|
def _iqueue_value_for_failure(self, greenlet):
|
||||||
|
return (greenlet.index, IMapUnordered._iqueue_value_for_failure(self, greenlet))
|
||||||
|
|
||||||
|
def _iqueue_value_for_finished(self):
|
||||||
|
self.maxindex += 1
|
||||||
|
return (self.maxindex, IMapUnordered._iqueue_value_for_finished(self))
|
||||||
|
|
||||||
|
def _iqueue_value_for_self_failure(self):
|
||||||
|
self.maxindex += 1
|
||||||
|
return (self.maxindex, IMapUnordered._iqueue_value_for_self_failure(self))
|
||||||
|
|
||||||
|
|
||||||
|
class GroupMappingMixin(object):
|
||||||
|
# Internal, non-public API class.
|
||||||
|
# Provides mixin methods for implementing mapping pools. Subclasses must define:
|
||||||
|
|
||||||
|
# - self.spawn(func, *args, **kwargs): a function that runs `func` with `args`
|
||||||
|
# and `awargs`, potentially asynchronously. Return a value with a `get` method that
|
||||||
|
# blocks until the results of func are available, and a `link` method.
|
||||||
|
|
||||||
|
# - self._apply_immediately(): should the function passed to apply be called immediately,
|
||||||
|
# synchronously?
|
||||||
|
|
||||||
|
# - self._apply_async_use_greenlet(): Should apply_async directly call
|
||||||
|
# Greenlet.spawn(), bypassing self.spawn? Return true when self.spawn would block
|
||||||
|
|
||||||
|
# - self._apply_async_cb_spawn(callback, result): Run the given callback function, possiblly
|
||||||
|
# asynchronously, possibly synchronously.
|
||||||
|
|
||||||
|
def apply_cb(self, func, args=None, kwds=None, callback=None):
|
||||||
|
"""
|
||||||
|
:meth:`apply` the given *func(\\*args, \\*\\*kwds)*, and, if a *callback* is given, run it with the
|
||||||
|
results of *func* (unless an exception was raised.)
|
||||||
|
|
||||||
|
The *callback* may be called synchronously or asynchronously. If called
|
||||||
|
asynchronously, it will not be tracked by this group. (:class:`Group` and :class:`Pool`
|
||||||
|
call it asynchronously in a new greenlet; :class:`~gevent.threadpool.ThreadPool` calls
|
||||||
|
it synchronously in the current greenlet.)
|
||||||
|
"""
|
||||||
|
result = self.apply(func, args, kwds)
|
||||||
|
if callback is not None:
|
||||||
|
self._apply_async_cb_spawn(callback, result)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def apply_async(self, func, args=None, kwds=None, callback=None):
|
||||||
|
"""
|
||||||
|
A variant of the :meth:`apply` method which returns a :class:`~.Greenlet` object.
|
||||||
|
|
||||||
|
When the returned greenlet gets to run, it *will* call :meth:`apply`,
|
||||||
|
passing in *func*, *args* and *kwds*.
|
||||||
|
|
||||||
|
If *callback* is specified, then it should be a callable which
|
||||||
|
accepts a single argument. When the result becomes ready
|
||||||
|
callback is applied to it (unless the call failed).
|
||||||
|
|
||||||
|
This method will never block, even if this group is full (that is,
|
||||||
|
even if :meth:`spawn` would block, this method will not).
|
||||||
|
|
||||||
|
.. caution:: The returned greenlet may or may not be tracked
|
||||||
|
as part of this group, so :meth:`joining <join>` this group is
|
||||||
|
not a reliable way to wait for the results to be available or
|
||||||
|
for the returned greenlet to run; instead, join the returned
|
||||||
|
greenlet.
|
||||||
|
|
||||||
|
.. tip:: Because :class:`~.ThreadPool` objects do not track greenlets, the returned
|
||||||
|
greenlet will never be a part of it. To reduce overhead and improve performance,
|
||||||
|
:class:`Group` and :class:`Pool` may choose to track the returned
|
||||||
|
greenlet. These are implementation details that may change.
|
||||||
|
"""
|
||||||
|
if args is None:
|
||||||
|
args = ()
|
||||||
|
if kwds is None:
|
||||||
|
kwds = {}
|
||||||
|
if self._apply_async_use_greenlet():
|
||||||
|
# cannot call self.spawn() directly because it will block
|
||||||
|
# XXX: This is always the case for ThreadPool, but for Group/Pool
|
||||||
|
# of greenlets, this is only the case when they are full...hence
|
||||||
|
# the weasely language about "may or may not be tracked". Should we make
|
||||||
|
# Group/Pool always return true as well so it's never tracked by any
|
||||||
|
# implementation? That would simplify that logic, but could increase
|
||||||
|
# the total number of greenlets in the system and add a layer of
|
||||||
|
# overhead for the simple cases when the pool isn't full.
|
||||||
|
return Greenlet.spawn(self.apply_cb, func, args, kwds, callback)
|
||||||
|
|
||||||
|
greenlet = self.spawn(func, *args, **kwds)
|
||||||
|
if callback is not None:
|
||||||
|
greenlet.link(pass_value(callback))
|
||||||
|
return greenlet
|
||||||
|
|
||||||
|
def apply(self, func, args=None, kwds=None):
|
||||||
|
"""
|
||||||
|
Rough quivalent of the :func:`apply()` builtin function blocking until
|
||||||
|
the result is ready and returning it.
|
||||||
|
|
||||||
|
The ``func`` will *usually*, but not *always*, be run in a way
|
||||||
|
that allows the current greenlet to switch out (for example,
|
||||||
|
in a new greenlet or thread, depending on implementation). But
|
||||||
|
if the current greenlet or thread is already one that was
|
||||||
|
spawned by this pool, the pool may choose to immediately run
|
||||||
|
the `func` synchronously.
|
||||||
|
|
||||||
|
Any exception ``func`` raises will be propagated to the caller of ``apply`` (that is,
|
||||||
|
this method will raise the exception that ``func`` raised).
|
||||||
|
"""
|
||||||
|
if args is None:
|
||||||
|
args = ()
|
||||||
|
if kwds is None:
|
||||||
|
kwds = {}
|
||||||
|
if self._apply_immediately():
|
||||||
|
return func(*args, **kwds)
|
||||||
|
return self.spawn(func, *args, **kwds).get()
|
||||||
|
|
||||||
|
def map(self, func, iterable):
|
||||||
|
"""Return a list made by applying the *func* to each element of
|
||||||
|
the iterable.
|
||||||
|
|
||||||
|
.. seealso:: :meth:`imap`
|
||||||
|
"""
|
||||||
|
return list(self.imap(func, iterable))
|
||||||
|
|
||||||
|
def map_cb(self, func, iterable, callback=None):
|
||||||
|
result = self.map(func, iterable)
|
||||||
|
if callback is not None:
|
||||||
|
callback(result)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def map_async(self, func, iterable, callback=None):
|
||||||
|
"""
|
||||||
|
A variant of the map() method which returns a Greenlet object that is executing
|
||||||
|
the map function.
|
||||||
|
|
||||||
|
If callback is specified then it should be a callable which accepts a
|
||||||
|
single argument.
|
||||||
|
"""
|
||||||
|
return Greenlet.spawn(self.map_cb, func, iterable, callback)
|
||||||
|
|
||||||
|
def __imap(self, cls, func, *iterables, **kwargs):
|
||||||
|
# Python 2 doesn't support the syntax that lets us mix varargs and
|
||||||
|
# a named kwarg, so we have to unpack manually
|
||||||
|
maxsize = kwargs.pop('maxsize', None)
|
||||||
|
if kwargs:
|
||||||
|
raise TypeError("Unsupported keyword arguments")
|
||||||
|
return cls.spawn(func, izip(*iterables), spawn=self.spawn,
|
||||||
|
_zipped=True, maxsize=maxsize)
|
||||||
|
|
||||||
|
def imap(self, func, *iterables, **kwargs):
|
||||||
|
"""
|
||||||
|
imap(func, *iterables, maxsize=None) -> iterable
|
||||||
|
|
||||||
|
An equivalent of :func:`itertools.imap`, operating in parallel.
|
||||||
|
The *func* is applied to each element yielded from each
|
||||||
|
iterable in *iterables* in turn, collecting the result.
|
||||||
|
|
||||||
|
If this object has a bound on the number of active greenlets it can
|
||||||
|
contain (such as :class:`Pool`), then at most that number of tasks will operate
|
||||||
|
in parallel.
|
||||||
|
|
||||||
|
:keyword int maxsize: If given and not-None, specifies the maximum number of
|
||||||
|
finished results that will be allowed to accumulate awaiting the reader;
|
||||||
|
more than that number of results will cause map function greenlets to begin
|
||||||
|
to block. This is most useful if there is a great disparity in the speed of
|
||||||
|
the mapping code and the consumer and the results consume a great deal of resources.
|
||||||
|
|
||||||
|
.. note:: This is separate from any bound on the number of active parallel
|
||||||
|
tasks, though they may have some interaction (for example, limiting the
|
||||||
|
number of parallel tasks to the smallest bound).
|
||||||
|
|
||||||
|
.. note:: Using a bound is slightly more computationally expensive than not using a bound.
|
||||||
|
|
||||||
|
.. tip:: The :meth:`imap_unordered` method makes much better
|
||||||
|
use of this parameter. Some additional, unspecified,
|
||||||
|
number of objects may be required to be kept in memory
|
||||||
|
to maintain order by this function.
|
||||||
|
|
||||||
|
:return: An iterable object.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1b3
|
||||||
|
Added the *maxsize* keyword parameter.
|
||||||
|
.. versionchanged:: 1.1a1
|
||||||
|
Accept multiple *iterables* to iterate in parallel.
|
||||||
|
"""
|
||||||
|
return self.__imap(IMap, func, *iterables, **kwargs)
|
||||||
|
|
||||||
|
def imap_unordered(self, func, *iterables, **kwargs):
|
||||||
|
"""
|
||||||
|
imap_unordered(func, *iterables, maxsize=None) -> iterable
|
||||||
|
|
||||||
|
The same as :meth:`imap` except that the ordering of the results
|
||||||
|
from the returned iterator should be considered in arbitrary
|
||||||
|
order.
|
||||||
|
|
||||||
|
This is lighter weight than :meth:`imap` and should be preferred if order
|
||||||
|
doesn't matter.
|
||||||
|
|
||||||
|
.. seealso:: :meth:`imap` for more details.
|
||||||
|
"""
|
||||||
|
return self.__imap(IMapUnordered, func, *iterables, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class Group(GroupMappingMixin):
|
||||||
|
"""
|
||||||
|
Maintain a group of greenlets that are still running, without
|
||||||
|
limiting their number.
|
||||||
|
|
||||||
|
Links to each item and removes it upon notification.
|
||||||
|
|
||||||
|
Groups can be iterated to discover what greenlets they are tracking,
|
||||||
|
they can be tested to see if they contain a greenlet, and they know the
|
||||||
|
number (len) of greenlets they are tracking. If they are not tracking any
|
||||||
|
greenlets, they are False in a boolean context.
|
||||||
|
"""
|
||||||
|
|
||||||
|
#: The type of Greenlet object we will :meth:`spawn`. This can be changed
|
||||||
|
#: on an instance or in a subclass.
|
||||||
|
greenlet_class = Greenlet
|
||||||
|
|
||||||
|
def __init__(self, *args):
|
||||||
|
assert len(args) <= 1, args
|
||||||
|
self.greenlets = set(*args)
|
||||||
|
if args:
|
||||||
|
for greenlet in args[0]:
|
||||||
|
greenlet.rawlink(self._discard)
|
||||||
|
# each item we kill we place in dying, to avoid killing the same greenlet twice
|
||||||
|
self.dying = set()
|
||||||
|
self._empty_event = Event()
|
||||||
|
self._empty_event.set()
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return '<%s at 0x%x %s>' % (self.__class__.__name__, id(self), self.greenlets)
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
"""
|
||||||
|
Answer how many greenlets we are tracking. Note that if we are empty,
|
||||||
|
we are False in a boolean context.
|
||||||
|
"""
|
||||||
|
return len(self.greenlets)
|
||||||
|
|
||||||
|
def __contains__(self, item):
|
||||||
|
"""
|
||||||
|
Answer if we are tracking the given greenlet.
|
||||||
|
"""
|
||||||
|
return item in self.greenlets
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
"""
|
||||||
|
Iterate across all the greenlets we are tracking, in no particular order.
|
||||||
|
"""
|
||||||
|
return iter(self.greenlets)
|
||||||
|
|
||||||
|
def add(self, greenlet):
|
||||||
|
"""
|
||||||
|
Begin tracking the greenlet.
|
||||||
|
|
||||||
|
If this group is :meth:`full`, then this method may block
|
||||||
|
until it is possible to track the greenlet.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
rawlink = greenlet.rawlink
|
||||||
|
except AttributeError:
|
||||||
|
pass # non-Greenlet greenlet, like MAIN
|
||||||
|
else:
|
||||||
|
rawlink(self._discard)
|
||||||
|
self.greenlets.add(greenlet)
|
||||||
|
self._empty_event.clear()
|
||||||
|
|
||||||
|
def _discard(self, greenlet):
|
||||||
|
self.greenlets.discard(greenlet)
|
||||||
|
self.dying.discard(greenlet)
|
||||||
|
if not self.greenlets:
|
||||||
|
self._empty_event.set()
|
||||||
|
|
||||||
|
def discard(self, greenlet):
|
||||||
|
"""
|
||||||
|
Stop tracking the greenlet.
|
||||||
|
"""
|
||||||
|
self._discard(greenlet)
|
||||||
|
try:
|
||||||
|
unlink = greenlet.unlink
|
||||||
|
except AttributeError:
|
||||||
|
pass # non-Greenlet greenlet, like MAIN
|
||||||
|
else:
|
||||||
|
unlink(self._discard)
|
||||||
|
|
||||||
|
def start(self, greenlet):
|
||||||
|
"""
|
||||||
|
Start the un-started *greenlet* and add it to the collection of greenlets
|
||||||
|
this group is monitoring.
|
||||||
|
"""
|
||||||
|
self.add(greenlet)
|
||||||
|
greenlet.start()
|
||||||
|
|
||||||
|
def spawn(self, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Begin a new greenlet with the given arguments (which are passed
|
||||||
|
to the greenlet constructor) and add it to the collection of greenlets
|
||||||
|
this group is monitoring.
|
||||||
|
|
||||||
|
:return: The newly started greenlet.
|
||||||
|
"""
|
||||||
|
greenlet = self.greenlet_class(*args, **kwargs)
|
||||||
|
self.start(greenlet)
|
||||||
|
return greenlet
|
||||||
|
|
||||||
|
# def close(self):
|
||||||
|
# """Prevents any more tasks from being submitted to the pool"""
|
||||||
|
# self.add = RaiseException("This %s has been closed" % self.__class__.__name__)
|
||||||
|
|
||||||
|
def join(self, timeout=None, raise_error=False):
|
||||||
|
"""
|
||||||
|
Wait for this group to become empty *at least once*.
|
||||||
|
|
||||||
|
If there are no greenlets in the group, returns immediately.
|
||||||
|
|
||||||
|
.. note:: By the time the waiting code (the caller of this
|
||||||
|
method) regains control, a greenlet may have been added to
|
||||||
|
this group, and so this object may no longer be empty. (That
|
||||||
|
is, ``group.join(); assert len(group) == 0`` is not
|
||||||
|
guaranteed to hold.) This method only guarantees that the group
|
||||||
|
reached a ``len`` of 0 at some point.
|
||||||
|
|
||||||
|
:keyword bool raise_error: If True (*not* the default), if any
|
||||||
|
greenlet that finished while the join was in progress raised
|
||||||
|
an exception, that exception will be raised to the caller of
|
||||||
|
this method. If multiple greenlets raised exceptions, which
|
||||||
|
one gets re-raised is not determined. Only greenlets currently
|
||||||
|
in the group when this method is called are guaranteed to
|
||||||
|
be checked for exceptions.
|
||||||
|
|
||||||
|
:return bool: A value indicating whether this group became empty.
|
||||||
|
If the timeout is specified and the group did not become empty
|
||||||
|
during that timeout, then this will be a false value. Otherwise
|
||||||
|
it will be a true value.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.2a1
|
||||||
|
Add the return value.
|
||||||
|
"""
|
||||||
|
greenlets = list(self.greenlets) if raise_error else ()
|
||||||
|
result = self._empty_event.wait(timeout=timeout)
|
||||||
|
|
||||||
|
for greenlet in greenlets:
|
||||||
|
if greenlet.exception is not None:
|
||||||
|
if hasattr(greenlet, '_raise_exception'):
|
||||||
|
greenlet._raise_exception()
|
||||||
|
raise greenlet.exception
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def kill(self, exception=GreenletExit, block=True, timeout=None):
|
||||||
|
"""
|
||||||
|
Kill all greenlets being tracked by this group.
|
||||||
|
"""
|
||||||
|
timer = Timeout._start_new_or_dummy(timeout)
|
||||||
|
try:
|
||||||
|
while self.greenlets:
|
||||||
|
for greenlet in list(self.greenlets):
|
||||||
|
if greenlet in self.dying:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
kill = greenlet.kill
|
||||||
|
except AttributeError:
|
||||||
|
_kill(greenlet, exception)
|
||||||
|
else:
|
||||||
|
kill(exception, block=False)
|
||||||
|
self.dying.add(greenlet)
|
||||||
|
if not block:
|
||||||
|
break
|
||||||
|
joinall(self.greenlets)
|
||||||
|
except Timeout as ex:
|
||||||
|
if ex is not timer:
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
timer.cancel()
|
||||||
|
|
||||||
|
def killone(self, greenlet, exception=GreenletExit, block=True, timeout=None):
|
||||||
|
"""
|
||||||
|
If the given *greenlet* is running and being tracked by this group,
|
||||||
|
kill it.
|
||||||
|
"""
|
||||||
|
if greenlet not in self.dying and greenlet in self.greenlets:
|
||||||
|
greenlet.kill(exception, block=False)
|
||||||
|
self.dying.add(greenlet)
|
||||||
|
if block:
|
||||||
|
greenlet.join(timeout)
|
||||||
|
|
||||||
|
def full(self):
|
||||||
|
"""
|
||||||
|
Return a value indicating whether this group can track more greenlets.
|
||||||
|
|
||||||
|
In this implementation, because there are no limits on the number of
|
||||||
|
tracked greenlets, this will always return a ``False`` value.
|
||||||
|
"""
|
||||||
|
return False
|
||||||
|
|
||||||
|
def wait_available(self, timeout=None):
|
||||||
|
"""
|
||||||
|
Block until it is possible to :meth:`spawn` a new greenlet.
|
||||||
|
|
||||||
|
In this implementation, because there are no limits on the number
|
||||||
|
of tracked greenlets, this will always return immediately.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
# MappingMixin methods
|
||||||
|
|
||||||
|
def _apply_immediately(self):
|
||||||
|
# If apply() is called from one of our own
|
||||||
|
# worker greenlets, don't spawn a new one---if we're full, that
|
||||||
|
# could deadlock.
|
||||||
|
return getcurrent() in self
|
||||||
|
|
||||||
|
def _apply_async_cb_spawn(self, callback, result):
|
||||||
|
Greenlet.spawn(callback, result)
|
||||||
|
|
||||||
|
def _apply_async_use_greenlet(self):
|
||||||
|
# cannot call self.spawn() because it will block, so
|
||||||
|
# use a fresh, untracked greenlet that when run will
|
||||||
|
# (indirectly) call self.spawn() for us.
|
||||||
|
return self.full()
|
||||||
|
|
||||||
|
|
||||||
|
class Failure(object):
|
||||||
|
__slots__ = ['exc', '_raise_exception']
|
||||||
|
|
||||||
|
def __init__(self, exc, raise_exception=None):
|
||||||
|
self.exc = exc
|
||||||
|
self._raise_exception = raise_exception
|
||||||
|
|
||||||
|
def raise_exc(self):
|
||||||
|
if self._raise_exception:
|
||||||
|
self._raise_exception()
|
||||||
|
else:
|
||||||
|
raise self.exc
|
||||||
|
|
||||||
|
|
||||||
|
class Pool(Group):
|
||||||
|
|
||||||
|
def __init__(self, size=None, greenlet_class=None):
|
||||||
|
"""
|
||||||
|
Create a new pool.
|
||||||
|
|
||||||
|
A pool is like a group, but the maximum number of members
|
||||||
|
is governed by the *size* parameter.
|
||||||
|
|
||||||
|
:keyword int size: If given, this non-negative integer is the
|
||||||
|
maximum count of active greenlets that will be allowed in
|
||||||
|
this pool. A few values have special significance:
|
||||||
|
|
||||||
|
* ``None`` (the default) places no limit on the number of
|
||||||
|
greenlets. This is useful when you need to track, but not limit,
|
||||||
|
greenlets, as with :class:`gevent.pywsgi.WSGIServer`. A :class:`Group`
|
||||||
|
may be a more efficient way to achieve the same effect.
|
||||||
|
* ``0`` creates a pool that can never have any active greenlets. Attempting
|
||||||
|
to spawn in this pool will block forever. This is only useful
|
||||||
|
if an application uses :meth:`wait_available` with a timeout and checks
|
||||||
|
:meth:`free_count` before attempting to spawn.
|
||||||
|
"""
|
||||||
|
if size is not None and size < 0:
|
||||||
|
raise ValueError('size must not be negative: %r' % (size, ))
|
||||||
|
Group.__init__(self)
|
||||||
|
self.size = size
|
||||||
|
if greenlet_class is not None:
|
||||||
|
self.greenlet_class = greenlet_class
|
||||||
|
if size is None:
|
||||||
|
factory = DummySemaphore
|
||||||
|
else:
|
||||||
|
factory = Semaphore
|
||||||
|
self._semaphore = factory(size)
|
||||||
|
|
||||||
|
def wait_available(self, timeout=None):
|
||||||
|
"""
|
||||||
|
Wait until it's possible to spawn a greenlet in this pool.
|
||||||
|
|
||||||
|
:param float timeout: If given, only wait the specified number
|
||||||
|
of seconds.
|
||||||
|
|
||||||
|
.. warning:: If the pool was initialized with a size of 0, this
|
||||||
|
method will block forever unless a timeout is given.
|
||||||
|
|
||||||
|
:return: A number indicating how many new greenlets can be put into
|
||||||
|
the pool without blocking.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1a3
|
||||||
|
Added the ``timeout`` parameter.
|
||||||
|
"""
|
||||||
|
return self._semaphore.wait(timeout=timeout)
|
||||||
|
|
||||||
|
def full(self):
|
||||||
|
"""
|
||||||
|
Return a boolean indicating whether this pool has any room for
|
||||||
|
members. (True if it does, False if it doesn't.)
|
||||||
|
"""
|
||||||
|
return self.free_count() <= 0
|
||||||
|
|
||||||
|
def free_count(self):
|
||||||
|
"""
|
||||||
|
Return a number indicating *approximately* how many more members
|
||||||
|
can be added to this pool.
|
||||||
|
"""
|
||||||
|
if self.size is None:
|
||||||
|
return 1
|
||||||
|
return max(0, self.size - len(self))
|
||||||
|
|
||||||
|
def add(self, greenlet):
|
||||||
|
"""
|
||||||
|
Begin tracking the given greenlet, blocking until space is available.
|
||||||
|
|
||||||
|
.. seealso:: :meth:`Group.add`
|
||||||
|
"""
|
||||||
|
self._semaphore.acquire()
|
||||||
|
try:
|
||||||
|
Group.add(self, greenlet)
|
||||||
|
except:
|
||||||
|
self._semaphore.release()
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _discard(self, greenlet):
|
||||||
|
Group._discard(self, greenlet)
|
||||||
|
self._semaphore.release()
|
||||||
|
|
||||||
|
|
||||||
|
class pass_value(object):
|
||||||
|
__slots__ = ['callback']
|
||||||
|
|
||||||
|
def __init__(self, callback):
|
||||||
|
self.callback = callback
|
||||||
|
|
||||||
|
def __call__(self, source):
|
||||||
|
if source.successful():
|
||||||
|
self.callback(source.value)
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return hash(self.callback)
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
return self.callback == getattr(other, 'callback', other)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return str(self.callback)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return repr(self.callback)
|
||||||
|
|
||||||
|
def __getattr__(self, item):
|
||||||
|
assert item != 'callback'
|
||||||
|
return getattr(self.callback, item)
|
||||||
17
python/gevent/python.pxd
Normal file
17
python/gevent/python.pxd
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
cdef extern from "Python.h":
|
||||||
|
struct PyObject:
|
||||||
|
pass
|
||||||
|
ctypedef PyObject* PyObjectPtr "PyObject*"
|
||||||
|
void Py_INCREF(PyObjectPtr)
|
||||||
|
void Py_DECREF(PyObjectPtr)
|
||||||
|
void Py_XDECREF(PyObjectPtr)
|
||||||
|
int Py_ReprEnter(PyObjectPtr)
|
||||||
|
void Py_ReprLeave(PyObjectPtr)
|
||||||
|
int PyCallable_Check(PyObjectPtr)
|
||||||
|
|
||||||
|
cdef extern from "frameobject.h":
|
||||||
|
ctypedef struct PyThreadState:
|
||||||
|
PyObjectPtr exc_type
|
||||||
|
PyObjectPtr exc_value
|
||||||
|
PyObjectPtr exc_traceback
|
||||||
|
PyThreadState* PyThreadState_GET()
|
||||||
1509
python/gevent/pywsgi.py
Normal file
1509
python/gevent/pywsgi.py
Normal file
File diff suppressed because it is too large
Load Diff
605
python/gevent/queue.py
Normal file
605
python/gevent/queue.py
Normal file
@@ -0,0 +1,605 @@
|
|||||||
|
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
|
||||||
|
"""Synchronized queues.
|
||||||
|
|
||||||
|
The :mod:`gevent.queue` module implements multi-producer, multi-consumer queues
|
||||||
|
that work across greenlets, with the API similar to the classes found in the
|
||||||
|
standard :mod:`Queue` and :class:`multiprocessing <multiprocessing.Queue>` modules.
|
||||||
|
|
||||||
|
The classes in this module implement iterator protocol. Iterating over queue
|
||||||
|
means repeatedly calling :meth:`get <Queue.get>` until :meth:`get <Queue.get>` returns ``StopIteration``.
|
||||||
|
|
||||||
|
>>> queue = gevent.queue.Queue()
|
||||||
|
>>> queue.put(1)
|
||||||
|
>>> queue.put(2)
|
||||||
|
>>> queue.put(StopIteration)
|
||||||
|
>>> for item in queue:
|
||||||
|
... print(item)
|
||||||
|
1
|
||||||
|
2
|
||||||
|
|
||||||
|
.. versionchanged:: 1.0
|
||||||
|
``Queue(0)`` now means queue of infinite size, not a channel. A :exc:`DeprecationWarning`
|
||||||
|
will be issued with this argument.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
|
import sys
|
||||||
|
import heapq
|
||||||
|
import collections
|
||||||
|
|
||||||
|
if sys.version_info[0] == 2:
|
||||||
|
import Queue as __queue__
|
||||||
|
else:
|
||||||
|
import queue as __queue__ # python 2: pylint:disable=import-error
|
||||||
|
Full = __queue__.Full
|
||||||
|
Empty = __queue__.Empty
|
||||||
|
|
||||||
|
from gevent.timeout import Timeout
|
||||||
|
from gevent.hub import get_hub, Waiter, getcurrent
|
||||||
|
from gevent.hub import InvalidSwitchError
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'JoinableQueue', 'Channel']
|
||||||
|
|
||||||
|
|
||||||
|
def _safe_remove(deq, item):
|
||||||
|
# For when the item may have been removed by
|
||||||
|
# Queue._unlock
|
||||||
|
try:
|
||||||
|
deq.remove(item)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class Queue(object):
|
||||||
|
"""
|
||||||
|
Create a queue object with a given maximum size.
|
||||||
|
|
||||||
|
If *maxsize* is less than or equal to zero or ``None``, the queue
|
||||||
|
size is infinite.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1b3
|
||||||
|
Queues now support :func:`len`; it behaves the same as :meth:`qsize`.
|
||||||
|
.. versionchanged:: 1.1b3
|
||||||
|
Multiple greenlets that block on a call to :meth:`put` for a full queue
|
||||||
|
will now be woken up to put their items into the queue in the order in which
|
||||||
|
they arrived. Likewise, multiple greenlets that block on a call to :meth:`get` for
|
||||||
|
an empty queue will now receive items in the order in which they blocked. An
|
||||||
|
implementation quirk under CPython *usually* ensured this was roughly the case
|
||||||
|
previously anyway, but that wasn't the case for PyPy.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, maxsize=None, items=None):
|
||||||
|
if maxsize is not None and maxsize <= 0:
|
||||||
|
self.maxsize = None
|
||||||
|
if maxsize == 0:
|
||||||
|
import warnings
|
||||||
|
warnings.warn('Queue(0) now equivalent to Queue(None); if you want a channel, use Channel',
|
||||||
|
DeprecationWarning, stacklevel=2)
|
||||||
|
else:
|
||||||
|
self.maxsize = maxsize
|
||||||
|
# Explicitly maintain order for getters and putters that block
|
||||||
|
# so that callers can consistently rely on getting things out
|
||||||
|
# in the apparent order they went in. This was once required by
|
||||||
|
# imap_unordered. Previously these were set() objects, and the
|
||||||
|
# items put in the set have default hash() and eq() methods;
|
||||||
|
# under CPython, since new objects tend to have increasing
|
||||||
|
# hash values, this tended to roughly maintain order anyway,
|
||||||
|
# but that's not true under PyPy. An alternative to a deque
|
||||||
|
# (to avoid the linear scan of remove()) might be an
|
||||||
|
# OrderedDict, but it's 2.7 only; we don't expect to have so
|
||||||
|
# many waiters that removing an arbitrary element is a
|
||||||
|
# bottleneck, though.
|
||||||
|
self.getters = collections.deque()
|
||||||
|
self.putters = collections.deque()
|
||||||
|
self.hub = get_hub()
|
||||||
|
self._event_unlock = None
|
||||||
|
if items:
|
||||||
|
self._init(maxsize, items)
|
||||||
|
else:
|
||||||
|
self._init(maxsize)
|
||||||
|
|
||||||
|
# QQQ make maxsize into a property with setter that schedules unlock if necessary
|
||||||
|
|
||||||
|
def copy(self):
|
||||||
|
return type(self)(self.maxsize, self.queue)
|
||||||
|
|
||||||
|
def _init(self, maxsize, items=None):
|
||||||
|
# FIXME: Why is maxsize unused or even passed?
|
||||||
|
# pylint:disable=unused-argument
|
||||||
|
if items:
|
||||||
|
self.queue = collections.deque(items)
|
||||||
|
else:
|
||||||
|
self.queue = collections.deque()
|
||||||
|
|
||||||
|
def _get(self):
|
||||||
|
return self.queue.popleft()
|
||||||
|
|
||||||
|
def _peek(self):
|
||||||
|
return self.queue[0]
|
||||||
|
|
||||||
|
def _put(self, item):
|
||||||
|
self.queue.append(item)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return '<%s at %s%s>' % (type(self).__name__, hex(id(self)), self._format())
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return '<%s%s>' % (type(self).__name__, self._format())
|
||||||
|
|
||||||
|
def _format(self):
|
||||||
|
result = []
|
||||||
|
if self.maxsize is not None:
|
||||||
|
result.append('maxsize=%r' % (self.maxsize, ))
|
||||||
|
if getattr(self, 'queue', None):
|
||||||
|
result.append('queue=%r' % (self.queue, ))
|
||||||
|
if self.getters:
|
||||||
|
result.append('getters[%s]' % len(self.getters))
|
||||||
|
if self.putters:
|
||||||
|
result.append('putters[%s]' % len(self.putters))
|
||||||
|
if result:
|
||||||
|
return ' ' + ' '.join(result)
|
||||||
|
return ''
|
||||||
|
|
||||||
|
def qsize(self):
|
||||||
|
"""Return the size of the queue."""
|
||||||
|
return len(self.queue)
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
"""
|
||||||
|
Return the size of the queue. This is the same as :meth:`qsize`.
|
||||||
|
|
||||||
|
.. versionadded: 1.1b3
|
||||||
|
|
||||||
|
Previously, getting len() of a queue would raise a TypeError.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self.qsize()
|
||||||
|
|
||||||
|
def __bool__(self):
|
||||||
|
"""
|
||||||
|
A queue object is always True.
|
||||||
|
|
||||||
|
.. versionadded: 1.1b3
|
||||||
|
|
||||||
|
Now that queues support len(), they need to implement ``__bool__``
|
||||||
|
to return True for backwards compatibility.
|
||||||
|
"""
|
||||||
|
return True
|
||||||
|
__nonzero__ = __bool__
|
||||||
|
|
||||||
|
def empty(self):
|
||||||
|
"""Return ``True`` if the queue is empty, ``False`` otherwise."""
|
||||||
|
return not self.qsize()
|
||||||
|
|
||||||
|
def full(self):
|
||||||
|
"""Return ``True`` if the queue is full, ``False`` otherwise.
|
||||||
|
|
||||||
|
``Queue(None)`` is never full.
|
||||||
|
"""
|
||||||
|
return self.maxsize is not None and self.qsize() >= self.maxsize
|
||||||
|
|
||||||
|
def put(self, item, block=True, timeout=None):
|
||||||
|
"""Put an item into the queue.
|
||||||
|
|
||||||
|
If optional arg *block* is true and *timeout* is ``None`` (the default),
|
||||||
|
block if necessary until a free slot is available. If *timeout* is
|
||||||
|
a positive number, it blocks at most *timeout* seconds and raises
|
||||||
|
the :class:`Full` exception if no free slot was available within that time.
|
||||||
|
Otherwise (*block* is false), put an item on the queue if a free slot
|
||||||
|
is immediately available, else raise the :class:`Full` exception (*timeout*
|
||||||
|
is ignored in that case).
|
||||||
|
"""
|
||||||
|
if self.maxsize is None or self.qsize() < self.maxsize:
|
||||||
|
# there's a free slot, put an item right away
|
||||||
|
self._put(item)
|
||||||
|
if self.getters:
|
||||||
|
self._schedule_unlock()
|
||||||
|
elif self.hub is getcurrent():
|
||||||
|
# We're in the mainloop, so we cannot wait; we can switch to other greenlets though.
|
||||||
|
# Check if possible to get a free slot in the queue.
|
||||||
|
while self.getters and self.qsize() and self.qsize() >= self.maxsize:
|
||||||
|
getter = self.getters.popleft()
|
||||||
|
getter.switch(getter)
|
||||||
|
if self.qsize() < self.maxsize:
|
||||||
|
self._put(item)
|
||||||
|
return
|
||||||
|
raise Full
|
||||||
|
elif block:
|
||||||
|
waiter = ItemWaiter(item, self)
|
||||||
|
self.putters.append(waiter)
|
||||||
|
timeout = Timeout._start_new_or_dummy(timeout, Full)
|
||||||
|
try:
|
||||||
|
if self.getters:
|
||||||
|
self._schedule_unlock()
|
||||||
|
result = waiter.get()
|
||||||
|
if result is not waiter:
|
||||||
|
raise InvalidSwitchError("Invalid switch into Queue.put: %r" % (result, ))
|
||||||
|
finally:
|
||||||
|
timeout.cancel()
|
||||||
|
_safe_remove(self.putters, waiter)
|
||||||
|
else:
|
||||||
|
raise Full
|
||||||
|
|
||||||
|
def put_nowait(self, item):
|
||||||
|
"""Put an item into the queue without blocking.
|
||||||
|
|
||||||
|
Only enqueue the item if a free slot is immediately available.
|
||||||
|
Otherwise raise the :class:`Full` exception.
|
||||||
|
"""
|
||||||
|
self.put(item, False)
|
||||||
|
|
||||||
|
def __get_or_peek(self, method, block, timeout):
|
||||||
|
# Internal helper method. The `method` should be either
|
||||||
|
# self._get when called from self.get() or self._peek when
|
||||||
|
# called from self.peek(). Call this after the initial check
|
||||||
|
# to see if there are items in the queue.
|
||||||
|
|
||||||
|
if self.hub is getcurrent():
|
||||||
|
# special case to make get_nowait() or peek_nowait() runnable in the mainloop greenlet
|
||||||
|
# there are no items in the queue; try to fix the situation by unlocking putters
|
||||||
|
while self.putters:
|
||||||
|
# Note: get() used popleft(), peek used pop(); popleft
|
||||||
|
# is almost certainly correct.
|
||||||
|
self.putters.popleft().put_and_switch()
|
||||||
|
if self.qsize():
|
||||||
|
return method()
|
||||||
|
raise Empty()
|
||||||
|
|
||||||
|
if not block:
|
||||||
|
# We can't block, we're not the hub, and we have nothing
|
||||||
|
# to return. No choice...
|
||||||
|
raise Empty()
|
||||||
|
|
||||||
|
waiter = Waiter()
|
||||||
|
timeout = Timeout._start_new_or_dummy(timeout, Empty)
|
||||||
|
try:
|
||||||
|
self.getters.append(waiter)
|
||||||
|
if self.putters:
|
||||||
|
self._schedule_unlock()
|
||||||
|
result = waiter.get()
|
||||||
|
if result is not waiter:
|
||||||
|
raise InvalidSwitchError('Invalid switch into Queue.get: %r' % (result, ))
|
||||||
|
return method()
|
||||||
|
finally:
|
||||||
|
timeout.cancel()
|
||||||
|
_safe_remove(self.getters, waiter)
|
||||||
|
|
||||||
|
def get(self, block=True, timeout=None):
|
||||||
|
"""Remove and return an item from the queue.
|
||||||
|
|
||||||
|
If optional args *block* is true and *timeout* is ``None`` (the default),
|
||||||
|
block if necessary until an item is available. If *timeout* is a positive number,
|
||||||
|
it blocks at most *timeout* seconds and raises the :class:`Empty` exception
|
||||||
|
if no item was available within that time. Otherwise (*block* is false), return
|
||||||
|
an item if one is immediately available, else raise the :class:`Empty` exception
|
||||||
|
(*timeout* is ignored in that case).
|
||||||
|
"""
|
||||||
|
if self.qsize():
|
||||||
|
if self.putters:
|
||||||
|
self._schedule_unlock()
|
||||||
|
return self._get()
|
||||||
|
|
||||||
|
return self.__get_or_peek(self._get, block, timeout)
|
||||||
|
|
||||||
|
def get_nowait(self):
|
||||||
|
"""Remove and return an item from the queue without blocking.
|
||||||
|
|
||||||
|
Only get an item if one is immediately available. Otherwise
|
||||||
|
raise the :class:`Empty` exception.
|
||||||
|
"""
|
||||||
|
return self.get(False)
|
||||||
|
|
||||||
|
def peek(self, block=True, timeout=None):
|
||||||
|
"""Return an item from the queue without removing it.
|
||||||
|
|
||||||
|
If optional args *block* is true and *timeout* is ``None`` (the default),
|
||||||
|
block if necessary until an item is available. If *timeout* is a positive number,
|
||||||
|
it blocks at most *timeout* seconds and raises the :class:`Empty` exception
|
||||||
|
if no item was available within that time. Otherwise (*block* is false), return
|
||||||
|
an item if one is immediately available, else raise the :class:`Empty` exception
|
||||||
|
(*timeout* is ignored in that case).
|
||||||
|
"""
|
||||||
|
if self.qsize():
|
||||||
|
# XXX: Why doesn't this schedule an unlock like get() does?
|
||||||
|
return self._peek()
|
||||||
|
|
||||||
|
return self.__get_or_peek(self._peek, block, timeout)
|
||||||
|
|
||||||
|
def peek_nowait(self):
|
||||||
|
"""Return an item from the queue without blocking.
|
||||||
|
|
||||||
|
Only return an item if one is immediately available. Otherwise
|
||||||
|
raise the :class:`Empty` exception.
|
||||||
|
"""
|
||||||
|
return self.peek(False)
|
||||||
|
|
||||||
|
def _unlock(self):
|
||||||
|
while True:
|
||||||
|
repeat = False
|
||||||
|
if self.putters and (self.maxsize is None or self.qsize() < self.maxsize):
|
||||||
|
repeat = True
|
||||||
|
try:
|
||||||
|
putter = self.putters.popleft()
|
||||||
|
self._put(putter.item)
|
||||||
|
except: # pylint:disable=bare-except
|
||||||
|
putter.throw(*sys.exc_info())
|
||||||
|
else:
|
||||||
|
putter.switch(putter)
|
||||||
|
if self.getters and self.qsize():
|
||||||
|
repeat = True
|
||||||
|
getter = self.getters.popleft()
|
||||||
|
getter.switch(getter)
|
||||||
|
if not repeat:
|
||||||
|
return
|
||||||
|
|
||||||
|
def _schedule_unlock(self):
|
||||||
|
if not self._event_unlock:
|
||||||
|
self._event_unlock = self.hub.loop.run_callback(self._unlock)
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def next(self):
|
||||||
|
result = self.get()
|
||||||
|
if result is StopIteration:
|
||||||
|
raise result
|
||||||
|
return result
|
||||||
|
|
||||||
|
__next__ = next
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class ItemWaiter(Waiter):
|
||||||
|
__slots__ = ['item', 'queue']
|
||||||
|
|
||||||
|
def __init__(self, item, queue):
|
||||||
|
Waiter.__init__(self)
|
||||||
|
self.item = item
|
||||||
|
self.queue = queue
|
||||||
|
|
||||||
|
def put_and_switch(self):
|
||||||
|
self.queue._put(self.item)
|
||||||
|
self.queue = None
|
||||||
|
self.item = None
|
||||||
|
return self.switch(self)
|
||||||
|
|
||||||
|
|
||||||
|
class PriorityQueue(Queue):
|
||||||
|
'''A subclass of :class:`Queue` that retrieves entries in priority order (lowest first).
|
||||||
|
|
||||||
|
Entries are typically tuples of the form: ``(priority number, data)``.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.2a1
|
||||||
|
Any *items* given to the constructor will now be passed through
|
||||||
|
:func:`heapq.heapify` to ensure the invariants of this class hold.
|
||||||
|
Previously it was just assumed that they were already a heap.
|
||||||
|
'''
|
||||||
|
|
||||||
|
def _init(self, maxsize, items=None):
|
||||||
|
if items:
|
||||||
|
self.queue = list(items)
|
||||||
|
heapq.heapify(self.queue)
|
||||||
|
else:
|
||||||
|
self.queue = []
|
||||||
|
|
||||||
|
def _put(self, item, heappush=heapq.heappush):
|
||||||
|
# pylint:disable=arguments-differ
|
||||||
|
heappush(self.queue, item)
|
||||||
|
|
||||||
|
def _get(self, heappop=heapq.heappop):
|
||||||
|
# pylint:disable=arguments-differ
|
||||||
|
return heappop(self.queue)
|
||||||
|
|
||||||
|
|
||||||
|
class LifoQueue(Queue):
|
||||||
|
'''A subclass of :class:`Queue` that retrieves most recently added entries first.'''
|
||||||
|
|
||||||
|
def _init(self, maxsize, items=None):
|
||||||
|
if items:
|
||||||
|
self.queue = list(items)
|
||||||
|
else:
|
||||||
|
self.queue = []
|
||||||
|
|
||||||
|
def _put(self, item):
|
||||||
|
self.queue.append(item)
|
||||||
|
|
||||||
|
def _get(self):
|
||||||
|
return self.queue.pop()
|
||||||
|
|
||||||
|
def _peek(self):
|
||||||
|
return self.queue[-1]
|
||||||
|
|
||||||
|
|
||||||
|
class JoinableQueue(Queue):
|
||||||
|
"""
|
||||||
|
A subclass of :class:`Queue` that additionally has
|
||||||
|
:meth:`task_done` and :meth:`join` methods.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, maxsize=None, items=None, unfinished_tasks=None):
|
||||||
|
"""
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1a1
|
||||||
|
If *unfinished_tasks* is not given, then all the given *items*
|
||||||
|
(if any) will be considered unfinished.
|
||||||
|
|
||||||
|
"""
|
||||||
|
from gevent.event import Event
|
||||||
|
Queue.__init__(self, maxsize, items)
|
||||||
|
self._cond = Event()
|
||||||
|
self._cond.set()
|
||||||
|
|
||||||
|
if unfinished_tasks:
|
||||||
|
self.unfinished_tasks = unfinished_tasks
|
||||||
|
elif items:
|
||||||
|
self.unfinished_tasks = len(items)
|
||||||
|
else:
|
||||||
|
self.unfinished_tasks = 0
|
||||||
|
|
||||||
|
if self.unfinished_tasks:
|
||||||
|
self._cond.clear()
|
||||||
|
|
||||||
|
def copy(self):
|
||||||
|
return type(self)(self.maxsize, self.queue, self.unfinished_tasks)
|
||||||
|
|
||||||
|
def _format(self):
|
||||||
|
result = Queue._format(self)
|
||||||
|
if self.unfinished_tasks:
|
||||||
|
result += ' tasks=%s _cond=%s' % (self.unfinished_tasks, self._cond)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _put(self, item):
|
||||||
|
Queue._put(self, item)
|
||||||
|
self.unfinished_tasks += 1
|
||||||
|
self._cond.clear()
|
||||||
|
|
||||||
|
def task_done(self):
|
||||||
|
'''Indicate that a formerly enqueued task is complete. Used by queue consumer threads.
|
||||||
|
For each :meth:`get <Queue.get>` used to fetch a task, a subsequent call to :meth:`task_done` tells the queue
|
||||||
|
that the processing on the task is complete.
|
||||||
|
|
||||||
|
If a :meth:`join` is currently blocking, it will resume when all items have been processed
|
||||||
|
(meaning that a :meth:`task_done` call was received for every item that had been
|
||||||
|
:meth:`put <Queue.put>` into the queue).
|
||||||
|
|
||||||
|
Raises a :exc:`ValueError` if called more times than there were items placed in the queue.
|
||||||
|
'''
|
||||||
|
if self.unfinished_tasks <= 0:
|
||||||
|
raise ValueError('task_done() called too many times')
|
||||||
|
self.unfinished_tasks -= 1
|
||||||
|
if self.unfinished_tasks == 0:
|
||||||
|
self._cond.set()
|
||||||
|
|
||||||
|
def join(self, timeout=None):
|
||||||
|
'''
|
||||||
|
Block until all items in the queue have been gotten and processed.
|
||||||
|
|
||||||
|
The count of unfinished tasks goes up whenever an item is added to the queue.
|
||||||
|
The count goes down whenever a consumer thread calls :meth:`task_done` to indicate
|
||||||
|
that the item was retrieved and all work on it is complete. When the count of
|
||||||
|
unfinished tasks drops to zero, :meth:`join` unblocks.
|
||||||
|
|
||||||
|
:param float timeout: If not ``None``, then wait no more than this time in seconds
|
||||||
|
for all tasks to finish.
|
||||||
|
:return: ``True`` if all tasks have finished; if ``timeout`` was given and expired before
|
||||||
|
all tasks finished, ``False``.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1a1
|
||||||
|
Add the *timeout* parameter.
|
||||||
|
'''
|
||||||
|
return self._cond.wait(timeout=timeout)
|
||||||
|
|
||||||
|
|
||||||
|
class Channel(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.getters = collections.deque()
|
||||||
|
self.putters = collections.deque()
|
||||||
|
self.hub = get_hub()
|
||||||
|
self._event_unlock = None
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._format())
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return '<%s %s>' % (type(self).__name__, self._format())
|
||||||
|
|
||||||
|
def _format(self):
|
||||||
|
result = ''
|
||||||
|
if self.getters:
|
||||||
|
result += ' getters[%s]' % len(self.getters)
|
||||||
|
if self.putters:
|
||||||
|
result += ' putters[%s]' % len(self.putters)
|
||||||
|
return result
|
||||||
|
|
||||||
|
@property
|
||||||
|
def balance(self):
|
||||||
|
return len(self.putters) - len(self.getters)
|
||||||
|
|
||||||
|
def qsize(self):
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def empty(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def full(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def put(self, item, block=True, timeout=None):
|
||||||
|
if self.hub is getcurrent():
|
||||||
|
if self.getters:
|
||||||
|
getter = self.getters.popleft()
|
||||||
|
getter.switch(item)
|
||||||
|
return
|
||||||
|
raise Full
|
||||||
|
|
||||||
|
if not block:
|
||||||
|
timeout = 0
|
||||||
|
|
||||||
|
waiter = Waiter()
|
||||||
|
item = (item, waiter)
|
||||||
|
self.putters.append(item)
|
||||||
|
timeout = Timeout._start_new_or_dummy(timeout, Full)
|
||||||
|
try:
|
||||||
|
if self.getters:
|
||||||
|
self._schedule_unlock()
|
||||||
|
result = waiter.get()
|
||||||
|
if result is not waiter:
|
||||||
|
raise InvalidSwitchError("Invalid switch into Channel.put: %r" % (result, ))
|
||||||
|
except:
|
||||||
|
_safe_remove(self.putters, item)
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
timeout.cancel()
|
||||||
|
|
||||||
|
def put_nowait(self, item):
|
||||||
|
self.put(item, False)
|
||||||
|
|
||||||
|
def get(self, block=True, timeout=None):
|
||||||
|
if self.hub is getcurrent():
|
||||||
|
if self.putters:
|
||||||
|
item, putter = self.putters.popleft()
|
||||||
|
self.hub.loop.run_callback(putter.switch, putter)
|
||||||
|
return item
|
||||||
|
|
||||||
|
if not block:
|
||||||
|
timeout = 0
|
||||||
|
|
||||||
|
waiter = Waiter()
|
||||||
|
timeout = Timeout._start_new_or_dummy(timeout, Empty)
|
||||||
|
try:
|
||||||
|
self.getters.append(waiter)
|
||||||
|
if self.putters:
|
||||||
|
self._schedule_unlock()
|
||||||
|
return waiter.get()
|
||||||
|
except:
|
||||||
|
self.getters.remove(waiter)
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
timeout.cancel()
|
||||||
|
|
||||||
|
def get_nowait(self):
|
||||||
|
return self.get(False)
|
||||||
|
|
||||||
|
def _unlock(self):
|
||||||
|
while self.putters and self.getters:
|
||||||
|
getter = self.getters.popleft()
|
||||||
|
item, putter = self.putters.popleft()
|
||||||
|
getter.switch(item)
|
||||||
|
putter.switch(putter)
|
||||||
|
|
||||||
|
def _schedule_unlock(self):
|
||||||
|
if not self._event_unlock:
|
||||||
|
self._event_unlock = self.hub.loop.run_callback(self._unlock)
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def next(self):
|
||||||
|
result = self.get()
|
||||||
|
if result is StopIteration:
|
||||||
|
raise result
|
||||||
|
return result
|
||||||
|
|
||||||
|
__next__ = next # py3
|
||||||
388
python/gevent/resolver_ares.py
Normal file
388
python/gevent/resolver_ares.py
Normal file
@@ -0,0 +1,388 @@
|
|||||||
|
# Copyright (c) 2011-2015 Denis Bilenko. See LICENSE for details.
|
||||||
|
"""
|
||||||
|
c-ares based hostname resolver.
|
||||||
|
"""
|
||||||
|
from __future__ import absolute_import
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from _socket import getservbyname, getaddrinfo, gaierror, error
|
||||||
|
from gevent.hub import Waiter, get_hub
|
||||||
|
from gevent._compat import string_types, text_type, integer_types, reraise, PY3
|
||||||
|
from gevent.socket import AF_UNSPEC, AF_INET, AF_INET6, SOCK_STREAM, SOCK_DGRAM, SOCK_RAW, AI_NUMERICHOST, EAI_SERVICE, AI_PASSIVE
|
||||||
|
from gevent.ares import channel, InvalidIP # pylint:disable=import-error,no-name-in-module
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['Resolver']
|
||||||
|
|
||||||
|
|
||||||
|
class Resolver(object):
|
||||||
|
"""
|
||||||
|
Implementation of the resolver API using the `c-ares`_ library.
|
||||||
|
|
||||||
|
This implementation uses the c-ares library to handle name
|
||||||
|
resolution. c-ares is natively asynchronous at the socket level
|
||||||
|
and so integrates well into gevent's event loop.
|
||||||
|
|
||||||
|
In comparison to :class:`gevent.resolver_thread.Resolver` (which
|
||||||
|
delegates to the native system resolver), the implementation is
|
||||||
|
much more complex. In addition, there have been reports of it not
|
||||||
|
properly honoring certain system configurations (for example, the
|
||||||
|
order in which IPv4 and IPv6 results are returned may not match
|
||||||
|
the threaded resolver). However, because it does not use threads,
|
||||||
|
it may scale better for applications that make many lookups.
|
||||||
|
|
||||||
|
There are some known differences from the system resolver:
|
||||||
|
|
||||||
|
- ``gethostbyname_ex`` and ``gethostbyaddr`` may return different
|
||||||
|
for the ``aliaslist`` tuple member. (Sometimes the same,
|
||||||
|
sometimes in a different order, sometimes a different alias
|
||||||
|
altogether.)
|
||||||
|
- ``gethostbyname_ex`` may return the ``ipaddrlist`` in a different order.
|
||||||
|
- ``getaddrinfo`` does not return ``SOCK_RAW`` results.
|
||||||
|
- ``getaddrinfo`` may return results in a different order.
|
||||||
|
- Handling of ``.local`` (mDNS) names may be different, even if they are listed in
|
||||||
|
the hosts file.
|
||||||
|
- c-ares will not resolve ``broadcasthost``, even if listed in the hosts file.
|
||||||
|
- This implementation may raise ``gaierror(4)`` where the system implementation would raise
|
||||||
|
``herror(1)``.
|
||||||
|
- The results for ``localhost`` may be different. In particular, some system
|
||||||
|
resolvers will return more results from ``getaddrinfo`` than c-ares does,
|
||||||
|
such as SOCK_DGRAM results, and c-ares may report more ips on a multi-homed
|
||||||
|
host.
|
||||||
|
|
||||||
|
.. caution:: This module is considered extremely experimental on PyPy, and
|
||||||
|
due to its implementation in cython, it may be slower. It may also lead to
|
||||||
|
interpreter crashes.
|
||||||
|
|
||||||
|
.. _c-ares: http://c-ares.haxx.se
|
||||||
|
"""
|
||||||
|
|
||||||
|
ares_class = channel
|
||||||
|
|
||||||
|
def __init__(self, hub=None, use_environ=True, **kwargs):
|
||||||
|
if hub is None:
|
||||||
|
hub = get_hub()
|
||||||
|
self.hub = hub
|
||||||
|
if use_environ:
|
||||||
|
for key in os.environ:
|
||||||
|
if key.startswith('GEVENTARES_'):
|
||||||
|
name = key[11:].lower()
|
||||||
|
if name:
|
||||||
|
value = os.environ[key]
|
||||||
|
kwargs.setdefault(name, value)
|
||||||
|
self.ares = self.ares_class(hub.loop, **kwargs)
|
||||||
|
self.pid = os.getpid()
|
||||||
|
self.params = kwargs
|
||||||
|
self.fork_watcher = hub.loop.fork(ref=False)
|
||||||
|
self.fork_watcher.start(self._on_fork)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return '<gevent.resolver_ares.Resolver at 0x%x ares=%r>' % (id(self), self.ares)
|
||||||
|
|
||||||
|
def _on_fork(self):
|
||||||
|
# NOTE: See comment in gevent.hub.reinit.
|
||||||
|
pid = os.getpid()
|
||||||
|
if pid != self.pid:
|
||||||
|
self.hub.loop.run_callback(self.ares.destroy)
|
||||||
|
self.ares = self.ares_class(self.hub.loop, **self.params)
|
||||||
|
self.pid = pid
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if self.ares is not None:
|
||||||
|
self.hub.loop.run_callback(self.ares.destroy)
|
||||||
|
self.ares = None
|
||||||
|
self.fork_watcher.stop()
|
||||||
|
|
||||||
|
def gethostbyname(self, hostname, family=AF_INET):
|
||||||
|
hostname = _resolve_special(hostname, family)
|
||||||
|
return self.gethostbyname_ex(hostname, family)[-1][0]
|
||||||
|
|
||||||
|
def gethostbyname_ex(self, hostname, family=AF_INET):
|
||||||
|
if PY3:
|
||||||
|
if isinstance(hostname, str):
|
||||||
|
hostname = hostname.encode('idna')
|
||||||
|
elif not isinstance(hostname, (bytes, bytearray)):
|
||||||
|
raise TypeError('Expected es(idna), not %s' % type(hostname).__name__)
|
||||||
|
else:
|
||||||
|
if isinstance(hostname, text_type):
|
||||||
|
hostname = hostname.encode('ascii')
|
||||||
|
elif not isinstance(hostname, str):
|
||||||
|
raise TypeError('Expected string, not %s' % type(hostname).__name__)
|
||||||
|
|
||||||
|
while True:
|
||||||
|
ares = self.ares
|
||||||
|
try:
|
||||||
|
waiter = Waiter(self.hub)
|
||||||
|
ares.gethostbyname(waiter, hostname, family)
|
||||||
|
result = waiter.get()
|
||||||
|
if not result[-1]:
|
||||||
|
raise gaierror(-5, 'No address associated with hostname')
|
||||||
|
return result
|
||||||
|
except gaierror:
|
||||||
|
if ares is self.ares:
|
||||||
|
if hostname == b'255.255.255.255':
|
||||||
|
# The stdlib handles this case in 2.7 and 3.x, but ares does not.
|
||||||
|
# It is tested by test_socket.py in 3.4.
|
||||||
|
# HACK: So hardcode the expected return.
|
||||||
|
return ('255.255.255.255', [], ['255.255.255.255'])
|
||||||
|
raise
|
||||||
|
# "self.ares is not ares" means channel was destroyed (because we were forked)
|
||||||
|
|
||||||
|
def _lookup_port(self, port, socktype):
|
||||||
|
# pylint:disable=too-many-branches
|
||||||
|
socktypes = []
|
||||||
|
if isinstance(port, string_types):
|
||||||
|
try:
|
||||||
|
port = int(port)
|
||||||
|
except ValueError:
|
||||||
|
try:
|
||||||
|
if socktype == 0:
|
||||||
|
origport = port
|
||||||
|
try:
|
||||||
|
port = getservbyname(port, 'tcp')
|
||||||
|
socktypes.append(SOCK_STREAM)
|
||||||
|
except error:
|
||||||
|
port = getservbyname(port, 'udp')
|
||||||
|
socktypes.append(SOCK_DGRAM)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
if port == getservbyname(origport, 'udp'):
|
||||||
|
socktypes.append(SOCK_DGRAM)
|
||||||
|
except error:
|
||||||
|
pass
|
||||||
|
elif socktype == SOCK_STREAM:
|
||||||
|
port = getservbyname(port, 'tcp')
|
||||||
|
elif socktype == SOCK_DGRAM:
|
||||||
|
port = getservbyname(port, 'udp')
|
||||||
|
else:
|
||||||
|
raise gaierror(EAI_SERVICE, 'Servname not supported for ai_socktype')
|
||||||
|
except error as ex:
|
||||||
|
if 'not found' in str(ex):
|
||||||
|
raise gaierror(EAI_SERVICE, 'Servname not supported for ai_socktype')
|
||||||
|
else:
|
||||||
|
raise gaierror(str(ex))
|
||||||
|
except UnicodeEncodeError:
|
||||||
|
raise error('Int or String expected')
|
||||||
|
elif port is None:
|
||||||
|
port = 0
|
||||||
|
elif isinstance(port, integer_types):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
raise error('Int or String expected', port, type(port))
|
||||||
|
port = int(port % 65536)
|
||||||
|
if not socktypes and socktype:
|
||||||
|
socktypes.append(socktype)
|
||||||
|
return port, socktypes
|
||||||
|
|
||||||
|
def _getaddrinfo(self, host, port, family=0, socktype=0, proto=0, flags=0):
|
||||||
|
# pylint:disable=too-many-locals,too-many-branches
|
||||||
|
if isinstance(host, text_type):
|
||||||
|
host = host.encode('idna')
|
||||||
|
elif not isinstance(host, str) or (flags & AI_NUMERICHOST):
|
||||||
|
# this handles cases which do not require network access
|
||||||
|
# 1) host is None
|
||||||
|
# 2) host is of an invalid type
|
||||||
|
# 3) AI_NUMERICHOST flag is set
|
||||||
|
return getaddrinfo(host, port, family, socktype, proto, flags)
|
||||||
|
# we also call _socket.getaddrinfo below if family is not one of AF_*
|
||||||
|
|
||||||
|
port, socktypes = self._lookup_port(port, socktype)
|
||||||
|
|
||||||
|
socktype_proto = [(SOCK_STREAM, 6), (SOCK_DGRAM, 17), (SOCK_RAW, 0)]
|
||||||
|
if socktypes:
|
||||||
|
socktype_proto = [(x, y) for (x, y) in socktype_proto if x in socktypes]
|
||||||
|
if proto:
|
||||||
|
socktype_proto = [(x, y) for (x, y) in socktype_proto if proto == y]
|
||||||
|
|
||||||
|
ares = self.ares
|
||||||
|
|
||||||
|
if family == AF_UNSPEC:
|
||||||
|
ares_values = Values(self.hub, 2)
|
||||||
|
ares.gethostbyname(ares_values, host, AF_INET)
|
||||||
|
ares.gethostbyname(ares_values, host, AF_INET6)
|
||||||
|
elif family == AF_INET:
|
||||||
|
ares_values = Values(self.hub, 1)
|
||||||
|
ares.gethostbyname(ares_values, host, AF_INET)
|
||||||
|
elif family == AF_INET6:
|
||||||
|
ares_values = Values(self.hub, 1)
|
||||||
|
ares.gethostbyname(ares_values, host, AF_INET6)
|
||||||
|
else:
|
||||||
|
raise gaierror(5, 'ai_family not supported: %r' % (family, ))
|
||||||
|
|
||||||
|
values = ares_values.get()
|
||||||
|
if len(values) == 2 and values[0] == values[1]:
|
||||||
|
values.pop()
|
||||||
|
|
||||||
|
result = []
|
||||||
|
result4 = []
|
||||||
|
result6 = []
|
||||||
|
|
||||||
|
for addrs in values:
|
||||||
|
if addrs.family == AF_INET:
|
||||||
|
for addr in addrs[-1]:
|
||||||
|
sockaddr = (addr, port)
|
||||||
|
for socktype4, proto4 in socktype_proto:
|
||||||
|
result4.append((AF_INET, socktype4, proto4, '', sockaddr))
|
||||||
|
elif addrs.family == AF_INET6:
|
||||||
|
for addr in addrs[-1]:
|
||||||
|
if addr == '::1':
|
||||||
|
dest = result
|
||||||
|
else:
|
||||||
|
dest = result6
|
||||||
|
sockaddr = (addr, port, 0, 0)
|
||||||
|
for socktype6, proto6 in socktype_proto:
|
||||||
|
dest.append((AF_INET6, socktype6, proto6, '', sockaddr))
|
||||||
|
|
||||||
|
# As of 2016, some platforms return IPV6 first and some do IPV4 first,
|
||||||
|
# and some might even allow configuration of which is which. For backwards
|
||||||
|
# compatibility with earlier releases (but not necessarily resolver_thread!)
|
||||||
|
# we return 4 first. See https://github.com/gevent/gevent/issues/815 for more.
|
||||||
|
result += result4 + result6
|
||||||
|
|
||||||
|
if not result:
|
||||||
|
raise gaierror(-5, 'No address associated with hostname')
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def getaddrinfo(self, host, port, family=0, socktype=0, proto=0, flags=0):
|
||||||
|
while True:
|
||||||
|
ares = self.ares
|
||||||
|
try:
|
||||||
|
return self._getaddrinfo(host, port, family, socktype, proto, flags)
|
||||||
|
except gaierror:
|
||||||
|
if ares is self.ares:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _gethostbyaddr(self, ip_address):
|
||||||
|
if PY3:
|
||||||
|
if isinstance(ip_address, str):
|
||||||
|
ip_address = ip_address.encode('idna')
|
||||||
|
elif not isinstance(ip_address, (bytes, bytearray)):
|
||||||
|
raise TypeError('Expected es(idna), not %s' % type(ip_address).__name__)
|
||||||
|
else:
|
||||||
|
if isinstance(ip_address, text_type):
|
||||||
|
ip_address = ip_address.encode('ascii')
|
||||||
|
elif not isinstance(ip_address, str):
|
||||||
|
raise TypeError('Expected string, not %s' % type(ip_address).__name__)
|
||||||
|
|
||||||
|
waiter = Waiter(self.hub)
|
||||||
|
try:
|
||||||
|
self.ares.gethostbyaddr(waiter, ip_address)
|
||||||
|
return waiter.get()
|
||||||
|
except InvalidIP:
|
||||||
|
result = self._getaddrinfo(ip_address, None, family=AF_UNSPEC, socktype=SOCK_DGRAM)
|
||||||
|
if not result:
|
||||||
|
raise
|
||||||
|
_ip_address = result[0][-1][0]
|
||||||
|
if isinstance(_ip_address, text_type):
|
||||||
|
_ip_address = _ip_address.encode('ascii')
|
||||||
|
if _ip_address == ip_address:
|
||||||
|
raise
|
||||||
|
waiter.clear()
|
||||||
|
self.ares.gethostbyaddr(waiter, _ip_address)
|
||||||
|
return waiter.get()
|
||||||
|
|
||||||
|
def gethostbyaddr(self, ip_address):
|
||||||
|
ip_address = _resolve_special(ip_address, AF_UNSPEC)
|
||||||
|
while True:
|
||||||
|
ares = self.ares
|
||||||
|
try:
|
||||||
|
return self._gethostbyaddr(ip_address)
|
||||||
|
except gaierror:
|
||||||
|
if ares is self.ares:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _getnameinfo(self, sockaddr, flags):
|
||||||
|
if not isinstance(flags, int):
|
||||||
|
raise TypeError('an integer is required')
|
||||||
|
if not isinstance(sockaddr, tuple):
|
||||||
|
raise TypeError('getnameinfo() argument 1 must be a tuple')
|
||||||
|
|
||||||
|
address = sockaddr[0]
|
||||||
|
if not PY3 and isinstance(address, text_type):
|
||||||
|
address = address.encode('ascii')
|
||||||
|
|
||||||
|
if not isinstance(address, string_types):
|
||||||
|
raise TypeError('sockaddr[0] must be a string, not %s' % type(address).__name__)
|
||||||
|
|
||||||
|
port = sockaddr[1]
|
||||||
|
if not isinstance(port, int):
|
||||||
|
raise TypeError('port must be an integer, not %s' % type(port))
|
||||||
|
|
||||||
|
waiter = Waiter(self.hub)
|
||||||
|
result = self._getaddrinfo(address, str(sockaddr[1]), family=AF_UNSPEC, socktype=SOCK_DGRAM)
|
||||||
|
if not result:
|
||||||
|
reraise(*sys.exc_info())
|
||||||
|
elif len(result) != 1:
|
||||||
|
raise error('sockaddr resolved to multiple addresses')
|
||||||
|
family, _socktype, _proto, _name, address = result[0]
|
||||||
|
|
||||||
|
if family == AF_INET:
|
||||||
|
if len(sockaddr) != 2:
|
||||||
|
raise error("IPv4 sockaddr must be 2 tuple")
|
||||||
|
elif family == AF_INET6:
|
||||||
|
address = address[:2] + sockaddr[2:]
|
||||||
|
|
||||||
|
self.ares.getnameinfo(waiter, address, flags)
|
||||||
|
node, service = waiter.get()
|
||||||
|
|
||||||
|
if service is None:
|
||||||
|
if PY3:
|
||||||
|
# ares docs: "If the query did not complete
|
||||||
|
# successfully, or one of the values was not
|
||||||
|
# requested, node or service will be NULL ". Python 2
|
||||||
|
# allows that for the service, but Python 3 raises
|
||||||
|
# an error. This is tested by test_socket in py 3.4
|
||||||
|
err = gaierror('nodename nor servname provided, or not known')
|
||||||
|
err.errno = 8
|
||||||
|
raise err
|
||||||
|
service = '0'
|
||||||
|
return node, service
|
||||||
|
|
||||||
|
def getnameinfo(self, sockaddr, flags):
|
||||||
|
while True:
|
||||||
|
ares = self.ares
|
||||||
|
try:
|
||||||
|
return self._getnameinfo(sockaddr, flags)
|
||||||
|
except gaierror:
|
||||||
|
if ares is self.ares:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
class Values(object):
|
||||||
|
# helper to collect multiple values; ignore errors unless nothing has succeeded
|
||||||
|
# QQQ could probably be moved somewhere - hub.py?
|
||||||
|
|
||||||
|
__slots__ = ['count', 'values', 'error', 'waiter']
|
||||||
|
|
||||||
|
def __init__(self, hub, count):
|
||||||
|
self.count = count
|
||||||
|
self.values = []
|
||||||
|
self.error = None
|
||||||
|
self.waiter = Waiter(hub)
|
||||||
|
|
||||||
|
def __call__(self, source):
|
||||||
|
self.count -= 1
|
||||||
|
if source.exception is None:
|
||||||
|
self.values.append(source.value)
|
||||||
|
else:
|
||||||
|
self.error = source.exception
|
||||||
|
if self.count <= 0:
|
||||||
|
self.waiter.switch()
|
||||||
|
|
||||||
|
def get(self):
|
||||||
|
self.waiter.get()
|
||||||
|
if self.values:
|
||||||
|
return self.values
|
||||||
|
else:
|
||||||
|
assert error is not None
|
||||||
|
raise self.error # pylint:disable=raising-bad-type
|
||||||
|
|
||||||
|
|
||||||
|
def _resolve_special(hostname, family):
|
||||||
|
if hostname == '':
|
||||||
|
result = getaddrinfo(None, 0, family, SOCK_DGRAM, 0, AI_PASSIVE)
|
||||||
|
if len(result) != 1:
|
||||||
|
raise error('wildcard resolved to multiple address')
|
||||||
|
return result[0][4][0]
|
||||||
|
return hostname
|
||||||
71
python/gevent/resolver_thread.py
Normal file
71
python/gevent/resolver_thread.py
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
# Copyright (c) 2012-2015 Denis Bilenko. See LICENSE for details.
|
||||||
|
"""
|
||||||
|
Native thread-based hostname resolver.
|
||||||
|
"""
|
||||||
|
import _socket
|
||||||
|
from gevent._compat import text_type
|
||||||
|
from gevent.hub import get_hub
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['Resolver']
|
||||||
|
|
||||||
|
|
||||||
|
# trigger import of encodings.idna to avoid https://github.com/gevent/gevent/issues/349
|
||||||
|
text_type('foo').encode('idna')
|
||||||
|
|
||||||
|
|
||||||
|
class Resolver(object):
|
||||||
|
"""
|
||||||
|
Implementation of the resolver API using native threads and native resolution
|
||||||
|
functions.
|
||||||
|
|
||||||
|
Using the native resolution mechanisms ensures the highest
|
||||||
|
compatibility with what a non-gevent program would return
|
||||||
|
including good support for platform specific configuration
|
||||||
|
mechanisms. The use of native (non-greenlet) threads ensures that
|
||||||
|
a caller doesn't block other greenlets.
|
||||||
|
|
||||||
|
This implementation also has the benefit of being very simple in comparison to
|
||||||
|
:class:`gevent.resolver_ares.Resolver`.
|
||||||
|
|
||||||
|
.. tip::
|
||||||
|
|
||||||
|
Most users find this resolver to be quite reliable in a
|
||||||
|
properly monkey-patched environment. However, there have been
|
||||||
|
some reports of long delays, slow performance or even hangs,
|
||||||
|
particularly in long-lived programs that make many, many DNS
|
||||||
|
requests. If you suspect that may be happening to you, try the
|
||||||
|
ares resolver (and submit a bug report).
|
||||||
|
"""
|
||||||
|
def __init__(self, hub=None):
|
||||||
|
if hub is None:
|
||||||
|
hub = get_hub()
|
||||||
|
self.pool = hub.threadpool
|
||||||
|
if _socket.gaierror not in hub.NOT_ERROR:
|
||||||
|
# Do not cause lookup failures to get printed by the default
|
||||||
|
# error handler. This can be very noisy.
|
||||||
|
hub.NOT_ERROR += (_socket.gaierror, _socket.herror)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return '<gevent.resolver_thread.Resolver at 0x%x pool=%r>' % (id(self), self.pool)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# from briefly reading socketmodule.c, it seems that all of the functions
|
||||||
|
# below are thread-safe in Python, even if they are not thread-safe in C.
|
||||||
|
|
||||||
|
def gethostbyname(self, *args):
|
||||||
|
return self.pool.apply(_socket.gethostbyname, args)
|
||||||
|
|
||||||
|
def gethostbyname_ex(self, *args):
|
||||||
|
return self.pool.apply(_socket.gethostbyname_ex, args)
|
||||||
|
|
||||||
|
def getaddrinfo(self, *args, **kwargs):
|
||||||
|
return self.pool.apply(_socket.getaddrinfo, args, kwargs)
|
||||||
|
|
||||||
|
def gethostbyaddr(self, *args, **kwargs):
|
||||||
|
return self.pool.apply(_socket.gethostbyaddr, args, kwargs)
|
||||||
|
|
||||||
|
def getnameinfo(self, *args, **kwargs):
|
||||||
|
return self.pool.apply(_socket.getnameinfo, args, kwargs)
|
||||||
244
python/gevent/select.py
Normal file
244
python/gevent/select.py
Normal file
@@ -0,0 +1,244 @@
|
|||||||
|
# Copyright (c) 2009-2011 Denis Bilenko. See LICENSE for details.
|
||||||
|
"""
|
||||||
|
Waiting for I/O completion.
|
||||||
|
"""
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from gevent.event import Event
|
||||||
|
from gevent.hub import get_hub
|
||||||
|
from gevent.hub import sleep as _g_sleep
|
||||||
|
from gevent._compat import integer_types
|
||||||
|
from gevent._compat import iteritems
|
||||||
|
from gevent._compat import itervalues
|
||||||
|
from gevent._util import copy_globals
|
||||||
|
from gevent._util import _NONE
|
||||||
|
|
||||||
|
from errno import EINTR
|
||||||
|
if sys.platform.startswith('win32'):
|
||||||
|
def _original_select(_r, _w, _x, _t):
|
||||||
|
# windows cant handle three empty lists, but we've always
|
||||||
|
# accepted that, so don't try the compliance check on windows
|
||||||
|
return ((), (), ())
|
||||||
|
else:
|
||||||
|
from select import select as _original_select
|
||||||
|
|
||||||
|
try:
|
||||||
|
from select import poll as original_poll
|
||||||
|
from select import POLLIN, POLLOUT, POLLNVAL
|
||||||
|
__implements__ = ['select', 'poll']
|
||||||
|
except ImportError:
|
||||||
|
original_poll = None
|
||||||
|
__implements__ = ['select']
|
||||||
|
|
||||||
|
__all__ = ['error'] + __implements__
|
||||||
|
|
||||||
|
import select as __select__
|
||||||
|
|
||||||
|
error = __select__.error
|
||||||
|
|
||||||
|
__imports__ = copy_globals(__select__, globals(),
|
||||||
|
names_to_ignore=__all__,
|
||||||
|
dunder_names_to_keep=())
|
||||||
|
|
||||||
|
_EV_READ = 1
|
||||||
|
_EV_WRITE = 2
|
||||||
|
|
||||||
|
def get_fileno(obj):
|
||||||
|
try:
|
||||||
|
fileno_f = obj.fileno
|
||||||
|
except AttributeError:
|
||||||
|
if not isinstance(obj, integer_types):
|
||||||
|
raise TypeError('argument must be an int, or have a fileno() method: %r' % (obj,))
|
||||||
|
return obj
|
||||||
|
else:
|
||||||
|
return fileno_f()
|
||||||
|
|
||||||
|
|
||||||
|
class SelectResult(object):
|
||||||
|
__slots__ = ('read', 'write', 'event')
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.read = []
|
||||||
|
self.write = []
|
||||||
|
self.event = Event()
|
||||||
|
|
||||||
|
def add_read(self, socket):
|
||||||
|
self.read.append(socket)
|
||||||
|
self.event.set()
|
||||||
|
|
||||||
|
add_read.event = _EV_READ
|
||||||
|
|
||||||
|
def add_write(self, socket):
|
||||||
|
self.write.append(socket)
|
||||||
|
self.event.set()
|
||||||
|
|
||||||
|
add_write.event = _EV_WRITE
|
||||||
|
|
||||||
|
def __add_watchers(self, watchers, fdlist, callback, io, pri):
|
||||||
|
for fd in fdlist:
|
||||||
|
watcher = io(get_fileno(fd), callback.event)
|
||||||
|
watcher.priority = pri
|
||||||
|
watchers.append(watcher)
|
||||||
|
watcher.start(callback, fd)
|
||||||
|
|
||||||
|
def _make_watchers(self, watchers, rlist, wlist):
|
||||||
|
loop = get_hub().loop
|
||||||
|
io = loop.io
|
||||||
|
MAXPRI = loop.MAXPRI
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.__add_watchers(watchers, rlist, self.add_read, io, MAXPRI)
|
||||||
|
self.__add_watchers(watchers, wlist, self.add_write, io, MAXPRI)
|
||||||
|
except IOError as ex:
|
||||||
|
raise error(*ex.args)
|
||||||
|
|
||||||
|
def _closeall(self, watchers):
|
||||||
|
for watcher in watchers:
|
||||||
|
watcher.stop()
|
||||||
|
del watchers[:]
|
||||||
|
|
||||||
|
def select(self, rlist, wlist, timeout):
|
||||||
|
watchers = []
|
||||||
|
try:
|
||||||
|
self._make_watchers(watchers, rlist, wlist)
|
||||||
|
self.event.wait(timeout=timeout)
|
||||||
|
return self.read, self.write, []
|
||||||
|
finally:
|
||||||
|
self._closeall(watchers)
|
||||||
|
|
||||||
|
|
||||||
|
def select(rlist, wlist, xlist, timeout=None): # pylint:disable=unused-argument
|
||||||
|
"""An implementation of :meth:`select.select` that blocks only the current greenlet.
|
||||||
|
|
||||||
|
.. caution:: *xlist* is ignored.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.2a1
|
||||||
|
Raise a :exc:`ValueError` if timeout is negative. This matches Python 3's
|
||||||
|
behaviour (Python 2 would raise a ``select.error``). Previously gevent had
|
||||||
|
undefined behaviour.
|
||||||
|
.. versionchanged:: 1.2a1
|
||||||
|
Raise an exception if any of the file descriptors are invalid.
|
||||||
|
"""
|
||||||
|
if timeout is not None and timeout < 0:
|
||||||
|
# Raise an error like the real implementation; which error
|
||||||
|
# depends on the version. Python 3, where select.error is OSError,
|
||||||
|
# raises a ValueError (which makes sense). Older pythons raise
|
||||||
|
# the error from the select syscall...but we don't actually get there.
|
||||||
|
# We choose to just raise the ValueError as it makes more sense and is
|
||||||
|
# forward compatible
|
||||||
|
raise ValueError("timeout must be non-negative")
|
||||||
|
|
||||||
|
# First, do a poll with the original select system call. This
|
||||||
|
# is the most efficient way to check to see if any of the file descriptors
|
||||||
|
# have previously been closed and raise the correct corresponding exception.
|
||||||
|
# (Because libev tends to just return them as ready...)
|
||||||
|
# We accept the *xlist* here even though we can't below because this is all about
|
||||||
|
# error handling.
|
||||||
|
sel_results = ((), (), ())
|
||||||
|
try:
|
||||||
|
sel_results = _original_select(rlist, wlist, xlist, 0)
|
||||||
|
except error as e:
|
||||||
|
enumber = getattr(e, 'errno', None) or e.args[0]
|
||||||
|
if enumber != EINTR:
|
||||||
|
# Ignore interrupted syscalls
|
||||||
|
raise
|
||||||
|
|
||||||
|
if sel_results[0] or sel_results[1] or sel_results[2]:
|
||||||
|
# If we actually had stuff ready, go ahead and return it. No need
|
||||||
|
# to go through the trouble of doing our own stuff.
|
||||||
|
# However, because this is typically a place where scheduling switches
|
||||||
|
# can occur, we need to make sure that's still the case; otherwise a single
|
||||||
|
# consumer could monopolize the thread. (shows up in test_ftplib.)
|
||||||
|
_g_sleep()
|
||||||
|
return sel_results
|
||||||
|
|
||||||
|
result = SelectResult()
|
||||||
|
return result.select(rlist, wlist, timeout)
|
||||||
|
|
||||||
|
|
||||||
|
if original_poll is not None:
|
||||||
|
class PollResult(object):
|
||||||
|
__slots__ = ('events', 'event')
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.events = set()
|
||||||
|
self.event = Event()
|
||||||
|
|
||||||
|
def add_event(self, events, fd):
|
||||||
|
if events < 0:
|
||||||
|
result_flags = POLLNVAL
|
||||||
|
else:
|
||||||
|
result_flags = 0
|
||||||
|
if events & _EV_READ:
|
||||||
|
result_flags = POLLIN
|
||||||
|
if events & _EV_WRITE:
|
||||||
|
result_flags |= POLLOUT
|
||||||
|
|
||||||
|
self.events.add((fd, result_flags))
|
||||||
|
self.event.set()
|
||||||
|
|
||||||
|
class poll(object):
|
||||||
|
"""
|
||||||
|
An implementation of :class:`select.poll` that blocks only the current greenlet.
|
||||||
|
|
||||||
|
.. caution:: ``POLLPRI`` data is not supported.
|
||||||
|
|
||||||
|
.. versionadded:: 1.1b1
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
self.fds = {} # {int -> watcher}
|
||||||
|
self.loop = get_hub().loop
|
||||||
|
|
||||||
|
def register(self, fd, eventmask=_NONE):
|
||||||
|
if eventmask is _NONE:
|
||||||
|
flags = _EV_READ | _EV_WRITE
|
||||||
|
else:
|
||||||
|
flags = 0
|
||||||
|
if eventmask & POLLIN:
|
||||||
|
flags = _EV_READ
|
||||||
|
if eventmask & POLLOUT:
|
||||||
|
flags |= _EV_WRITE
|
||||||
|
# If they ask for POLLPRI, we can't support
|
||||||
|
# that. Should we raise an error?
|
||||||
|
|
||||||
|
fileno = get_fileno(fd)
|
||||||
|
watcher = self.loop.io(fileno, flags)
|
||||||
|
watcher.priority = self.loop.MAXPRI
|
||||||
|
self.fds[fileno] = watcher
|
||||||
|
|
||||||
|
def modify(self, fd, eventmask):
|
||||||
|
self.register(fd, eventmask)
|
||||||
|
|
||||||
|
def poll(self, timeout=None):
|
||||||
|
"""
|
||||||
|
poll the registered fds.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.2a1
|
||||||
|
File descriptors that are closed are reported with POLLNVAL.
|
||||||
|
"""
|
||||||
|
result = PollResult()
|
||||||
|
try:
|
||||||
|
for fd, watcher in iteritems(self.fds):
|
||||||
|
watcher.start(result.add_event, fd, pass_events=True)
|
||||||
|
if timeout is not None and timeout > -1:
|
||||||
|
timeout /= 1000.0
|
||||||
|
result.event.wait(timeout=timeout)
|
||||||
|
return list(result.events)
|
||||||
|
finally:
|
||||||
|
for awatcher in itervalues(self.fds):
|
||||||
|
awatcher.stop()
|
||||||
|
|
||||||
|
def unregister(self, fd):
|
||||||
|
"""
|
||||||
|
Unregister the *fd*.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.2a1
|
||||||
|
Raise a `KeyError` if *fd* was not registered, like the standard
|
||||||
|
library. Previously gevent did nothing.
|
||||||
|
"""
|
||||||
|
fileno = get_fileno(fd)
|
||||||
|
del self.fds[fileno]
|
||||||
|
|
||||||
|
del original_poll
|
||||||
255
python/gevent/server.py
Normal file
255
python/gevent/server.py
Normal file
@@ -0,0 +1,255 @@
|
|||||||
|
# Copyright (c) 2009-2012 Denis Bilenko. See LICENSE for details.
|
||||||
|
"""TCP/SSL server"""
|
||||||
|
import sys
|
||||||
|
import _socket
|
||||||
|
from gevent.baseserver import BaseServer
|
||||||
|
from gevent.socket import EWOULDBLOCK, socket
|
||||||
|
from gevent._compat import PYPY, PY3
|
||||||
|
|
||||||
|
__all__ = ['StreamServer', 'DatagramServer']
|
||||||
|
|
||||||
|
|
||||||
|
if sys.platform == 'win32':
|
||||||
|
# SO_REUSEADDR on Windows does not mean the same thing as on *nix (issue #217)
|
||||||
|
DEFAULT_REUSE_ADDR = None
|
||||||
|
else:
|
||||||
|
DEFAULT_REUSE_ADDR = 1
|
||||||
|
|
||||||
|
|
||||||
|
class StreamServer(BaseServer):
|
||||||
|
"""
|
||||||
|
A generic TCP server.
|
||||||
|
|
||||||
|
Accepts connections on a listening socket and spawns user-provided
|
||||||
|
*handle* function for each connection with 2 arguments: the client
|
||||||
|
socket and the client address.
|
||||||
|
|
||||||
|
Note that although the errors in a successfully spawned handler
|
||||||
|
will not affect the server or other connections, the errors raised
|
||||||
|
by :func:`accept` and *spawn* cause the server to stop accepting
|
||||||
|
for a short amount of time. The exact period depends on the values
|
||||||
|
of :attr:`min_delay` and :attr:`max_delay` attributes.
|
||||||
|
|
||||||
|
The delay starts with :attr:`min_delay` and doubles with each
|
||||||
|
successive error until it reaches :attr:`max_delay`. A successful
|
||||||
|
:func:`accept` resets the delay to :attr:`min_delay` again.
|
||||||
|
|
||||||
|
See :class:`~gevent.baseserver.BaseServer` for information on defining the *handle*
|
||||||
|
function and important restrictions on it.
|
||||||
|
|
||||||
|
**SSL Support**
|
||||||
|
|
||||||
|
The server can optionally work in SSL mode when given the correct
|
||||||
|
keyword arguments. (That is, the presence of any keyword arguments
|
||||||
|
will trigger SSL mode.) On Python 2.7.9 and later (any Python
|
||||||
|
version that supports the :class:`ssl.SSLContext`), this can be
|
||||||
|
done with a configured ``SSLContext``. On any Python version, it
|
||||||
|
can be done by passing the appropriate arguments for
|
||||||
|
:func:`ssl.wrap_socket`.
|
||||||
|
|
||||||
|
The incoming socket will be wrapped into an SSL socket before
|
||||||
|
being passed to the *handle* function.
|
||||||
|
|
||||||
|
If the *ssl_context* keyword argument is present, it should
|
||||||
|
contain an :class:`ssl.SSLContext`. The remaining keyword
|
||||||
|
arguments are passed to the :meth:`ssl.SSLContext.wrap_socket`
|
||||||
|
method of that object. Depending on the Python version, supported arguments
|
||||||
|
may include:
|
||||||
|
|
||||||
|
- server_hostname
|
||||||
|
- suppress_ragged_eofs
|
||||||
|
- do_handshake_on_connect
|
||||||
|
|
||||||
|
.. caution:: When using an SSLContext, it should either be
|
||||||
|
imported from :mod:`gevent.ssl`, or the process needs to be monkey-patched.
|
||||||
|
If the process is not monkey-patched and you pass the standard library
|
||||||
|
SSLContext, the resulting client sockets will not cooperate with gevent.
|
||||||
|
|
||||||
|
Otherwise, keyword arguments are assumed to apply to :func:`ssl.wrap_socket`.
|
||||||
|
These keyword arguments bay include:
|
||||||
|
|
||||||
|
- keyfile
|
||||||
|
- certfile
|
||||||
|
- cert_reqs
|
||||||
|
- ssl_version
|
||||||
|
- ca_certs
|
||||||
|
- suppress_ragged_eofs
|
||||||
|
- do_handshake_on_connect
|
||||||
|
- ciphers
|
||||||
|
|
||||||
|
.. versionchanged:: 1.2a2
|
||||||
|
Add support for the *ssl_context* keyword argument.
|
||||||
|
|
||||||
|
"""
|
||||||
|
# the default backlog to use if none was provided in __init__
|
||||||
|
backlog = 256
|
||||||
|
|
||||||
|
reuse_addr = DEFAULT_REUSE_ADDR
|
||||||
|
|
||||||
|
def __init__(self, listener, handle=None, backlog=None, spawn='default', **ssl_args):
|
||||||
|
BaseServer.__init__(self, listener, handle=handle, spawn=spawn)
|
||||||
|
try:
|
||||||
|
if ssl_args:
|
||||||
|
ssl_args.setdefault('server_side', True)
|
||||||
|
if 'ssl_context' in ssl_args:
|
||||||
|
ssl_context = ssl_args.pop('ssl_context')
|
||||||
|
self.wrap_socket = ssl_context.wrap_socket
|
||||||
|
self.ssl_args = ssl_args
|
||||||
|
else:
|
||||||
|
from gevent.ssl import wrap_socket
|
||||||
|
self.wrap_socket = wrap_socket
|
||||||
|
self.ssl_args = ssl_args
|
||||||
|
else:
|
||||||
|
self.ssl_args = None
|
||||||
|
if backlog is not None:
|
||||||
|
if hasattr(self, 'socket'):
|
||||||
|
raise TypeError('backlog must be None when a socket instance is passed')
|
||||||
|
self.backlog = backlog
|
||||||
|
except:
|
||||||
|
self.close()
|
||||||
|
raise
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ssl_enabled(self):
|
||||||
|
return self.ssl_args is not None
|
||||||
|
|
||||||
|
def set_listener(self, listener):
|
||||||
|
BaseServer.set_listener(self, listener)
|
||||||
|
try:
|
||||||
|
self.socket = self.socket._sock
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def init_socket(self):
|
||||||
|
if not hasattr(self, 'socket'):
|
||||||
|
# FIXME: clean up the socket lifetime
|
||||||
|
# pylint:disable=attribute-defined-outside-init
|
||||||
|
self.socket = self.get_listener(self.address, self.backlog, self.family)
|
||||||
|
self.address = self.socket.getsockname()
|
||||||
|
if self.ssl_args:
|
||||||
|
self._handle = self.wrap_socket_and_handle
|
||||||
|
else:
|
||||||
|
self._handle = self.handle
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_listener(cls, address, backlog=None, family=None):
|
||||||
|
if backlog is None:
|
||||||
|
backlog = cls.backlog
|
||||||
|
return _tcp_listener(address, backlog=backlog, reuse_addr=cls.reuse_addr, family=family)
|
||||||
|
|
||||||
|
if PY3:
|
||||||
|
|
||||||
|
def do_read(self):
|
||||||
|
sock = self.socket
|
||||||
|
try:
|
||||||
|
fd, address = sock._accept()
|
||||||
|
except BlockingIOError: # python 2: pylint: disable=undefined-variable
|
||||||
|
if not sock.timeout:
|
||||||
|
return
|
||||||
|
raise
|
||||||
|
sock = socket(sock.family, sock.type, sock.proto, fileno=fd)
|
||||||
|
# XXX Python issue #7995?
|
||||||
|
return sock, address
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
def do_read(self):
|
||||||
|
try:
|
||||||
|
client_socket, address = self.socket.accept()
|
||||||
|
except _socket.error as err:
|
||||||
|
if err.args[0] == EWOULDBLOCK:
|
||||||
|
return
|
||||||
|
raise
|
||||||
|
sockobj = socket(_sock=client_socket)
|
||||||
|
if PYPY:
|
||||||
|
client_socket._drop()
|
||||||
|
return sockobj, address
|
||||||
|
|
||||||
|
def do_close(self, sock, *args):
|
||||||
|
# pylint:disable=arguments-differ
|
||||||
|
sock.close()
|
||||||
|
|
||||||
|
def wrap_socket_and_handle(self, client_socket, address):
|
||||||
|
# used in case of ssl sockets
|
||||||
|
ssl_socket = self.wrap_socket(client_socket, **self.ssl_args)
|
||||||
|
return self.handle(ssl_socket, address)
|
||||||
|
|
||||||
|
|
||||||
|
class DatagramServer(BaseServer):
|
||||||
|
"""A UDP server"""
|
||||||
|
|
||||||
|
reuse_addr = DEFAULT_REUSE_ADDR
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
# The raw (non-gevent) socket, if possible
|
||||||
|
self._socket = None
|
||||||
|
BaseServer.__init__(self, *args, **kwargs)
|
||||||
|
from gevent.lock import Semaphore
|
||||||
|
self._writelock = Semaphore()
|
||||||
|
|
||||||
|
def init_socket(self):
|
||||||
|
if not hasattr(self, 'socket'):
|
||||||
|
# FIXME: clean up the socket lifetime
|
||||||
|
# pylint:disable=attribute-defined-outside-init
|
||||||
|
self.socket = self.get_listener(self.address, self.family)
|
||||||
|
self.address = self.socket.getsockname()
|
||||||
|
self._socket = self.socket
|
||||||
|
try:
|
||||||
|
self._socket = self._socket._sock
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_listener(cls, address, family=None):
|
||||||
|
return _udp_socket(address, reuse_addr=cls.reuse_addr, family=family)
|
||||||
|
|
||||||
|
def do_read(self):
|
||||||
|
try:
|
||||||
|
data, address = self._socket.recvfrom(8192)
|
||||||
|
except _socket.error as err:
|
||||||
|
if err.args[0] == EWOULDBLOCK:
|
||||||
|
return
|
||||||
|
raise
|
||||||
|
return data, address
|
||||||
|
|
||||||
|
def sendto(self, *args):
|
||||||
|
self._writelock.acquire()
|
||||||
|
try:
|
||||||
|
self.socket.sendto(*args)
|
||||||
|
finally:
|
||||||
|
self._writelock.release()
|
||||||
|
|
||||||
|
|
||||||
|
def _tcp_listener(address, backlog=50, reuse_addr=None, family=_socket.AF_INET):
|
||||||
|
"""A shortcut to create a TCP socket, bind it and put it into listening state."""
|
||||||
|
sock = socket(family=family)
|
||||||
|
if reuse_addr is not None:
|
||||||
|
sock.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, reuse_addr)
|
||||||
|
try:
|
||||||
|
sock.bind(address)
|
||||||
|
except _socket.error as ex:
|
||||||
|
strerror = getattr(ex, 'strerror', None)
|
||||||
|
if strerror is not None:
|
||||||
|
ex.strerror = strerror + ': ' + repr(address)
|
||||||
|
raise
|
||||||
|
sock.listen(backlog)
|
||||||
|
sock.setblocking(0)
|
||||||
|
return sock
|
||||||
|
|
||||||
|
|
||||||
|
def _udp_socket(address, backlog=50, reuse_addr=None, family=_socket.AF_INET):
|
||||||
|
# backlog argument for compat with tcp_listener
|
||||||
|
# pylint:disable=unused-argument
|
||||||
|
|
||||||
|
# we want gevent.socket.socket here
|
||||||
|
sock = socket(family=family, type=_socket.SOCK_DGRAM)
|
||||||
|
if reuse_addr is not None:
|
||||||
|
sock.setsockopt(_socket.SOL_SOCKET, _socket.SO_REUSEADDR, reuse_addr)
|
||||||
|
try:
|
||||||
|
sock.bind(address)
|
||||||
|
except _socket.error as ex:
|
||||||
|
strerror = getattr(ex, 'strerror', None)
|
||||||
|
if strerror is not None:
|
||||||
|
ex.strerror = strerror + ': ' + repr(address)
|
||||||
|
raise
|
||||||
|
return sock
|
||||||
137
python/gevent/signal.py
Normal file
137
python/gevent/signal.py
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
"""
|
||||||
|
Cooperative implementation of special cases of :func:`signal.signal`.
|
||||||
|
|
||||||
|
This module is designed to work with libev's child watchers, as used
|
||||||
|
by default in :func:`gevent.os.fork` Note that each ``SIGCHLD`` handler
|
||||||
|
will be run in a new greenlet when the signal is delivered (just like
|
||||||
|
:class:`gevent.hub.signal`)
|
||||||
|
|
||||||
|
The implementations in this module are only monkey patched if
|
||||||
|
:func:`gevent.os.waitpid` is being used (the default) and if
|
||||||
|
:const:`signal.SIGCHLD` is available; see :func:`gevent.os.fork` for
|
||||||
|
information on configuring this not to be the case for advanced uses.
|
||||||
|
|
||||||
|
.. versionadded:: 1.1b4
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
from gevent._util import _NONE as _INITIAL
|
||||||
|
from gevent._util import copy_globals
|
||||||
|
|
||||||
|
import signal as _signal
|
||||||
|
|
||||||
|
__implements__ = []
|
||||||
|
__extensions__ = []
|
||||||
|
|
||||||
|
|
||||||
|
_child_handler = _INITIAL
|
||||||
|
|
||||||
|
_signal_signal = _signal.signal
|
||||||
|
_signal_getsignal = _signal.getsignal
|
||||||
|
|
||||||
|
|
||||||
|
def getsignal(signalnum):
|
||||||
|
"""
|
||||||
|
Exactly the same as :func:`signal.signal` except where
|
||||||
|
:const:`signal.SIGCHLD` is concerned.
|
||||||
|
|
||||||
|
For :const:`signal.SIGCHLD`, this cooperates with :func:`signal`
|
||||||
|
to provide consistent answers.
|
||||||
|
"""
|
||||||
|
if signalnum != _signal.SIGCHLD:
|
||||||
|
return _signal_getsignal(signalnum)
|
||||||
|
|
||||||
|
global _child_handler
|
||||||
|
if _child_handler is _INITIAL:
|
||||||
|
_child_handler = _signal_getsignal(_signal.SIGCHLD)
|
||||||
|
|
||||||
|
return _child_handler
|
||||||
|
|
||||||
|
|
||||||
|
def signal(signalnum, handler):
|
||||||
|
"""
|
||||||
|
Exactly the same as :func:`signal.signal` except where
|
||||||
|
:const:`signal.SIGCHLD` is concerned.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
A :const:`signal.SIGCHLD` handler installed with this function
|
||||||
|
will only be triggered for children that are forked using
|
||||||
|
:func:`gevent.os.fork` (:func:`gevent.os.fork_and_watch`);
|
||||||
|
children forked before monkey patching, or otherwise by the raw
|
||||||
|
:func:`os.fork`, will not trigger the handler installed by this
|
||||||
|
function. (It's unlikely that a SIGCHLD handler installed with
|
||||||
|
the builtin :func:`signal.signal` would be triggered either;
|
||||||
|
libev typically overwrites such a handler at the C level. At
|
||||||
|
the very least, it's full of race conditions.)
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
Use of ``SIG_IGN`` and ``SIG_DFL`` may also have race conditions
|
||||||
|
with libev child watchers and the :mod:`gevent.subprocess` module.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.2a1
|
||||||
|
If ``SIG_IGN`` or ``SIG_DFL`` are used to ignore ``SIGCHLD``, a
|
||||||
|
future use of ``gevent.subprocess`` and libev child watchers
|
||||||
|
will once again work. However, on Python 2, use of ``os.popen``
|
||||||
|
will fail.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1rc2
|
||||||
|
Allow using ``SIG_IGN`` and ``SIG_DFL`` to reset and ignore ``SIGCHLD``.
|
||||||
|
However, this allows the possibility of a race condition if ``gevent.subprocess``
|
||||||
|
had already been used.
|
||||||
|
"""
|
||||||
|
if signalnum != _signal.SIGCHLD:
|
||||||
|
return _signal_signal(signalnum, handler)
|
||||||
|
|
||||||
|
# TODO: raise value error if not called from the main
|
||||||
|
# greenlet, just like threads
|
||||||
|
|
||||||
|
if handler != _signal.SIG_IGN and handler != _signal.SIG_DFL and not callable(handler):
|
||||||
|
# exact same error message raised by the stdlib
|
||||||
|
raise TypeError("signal handler must be signal.SIG_IGN, signal.SIG_DFL, or a callable object")
|
||||||
|
|
||||||
|
old_handler = getsignal(signalnum)
|
||||||
|
global _child_handler
|
||||||
|
_child_handler = handler
|
||||||
|
if handler == _signal.SIG_IGN or handler == _signal.SIG_DFL:
|
||||||
|
# Allow resetting/ignoring this signal at the process level.
|
||||||
|
# Note that this conflicts with gevent.subprocess and other users
|
||||||
|
# of child watchers, until the next time gevent.subprocess/loop.install_sigchld()
|
||||||
|
# is called.
|
||||||
|
from gevent import get_hub # Are we always safe to import here?
|
||||||
|
_signal_signal(signalnum, handler)
|
||||||
|
get_hub().loop.reset_sigchld()
|
||||||
|
return old_handler
|
||||||
|
|
||||||
|
|
||||||
|
def _on_child_hook():
|
||||||
|
# This is called in the hub greenlet. To let the function
|
||||||
|
# do more useful work, like use blocking functions,
|
||||||
|
# we run it in a new greenlet; see gevent.hub.signal
|
||||||
|
if callable(_child_handler):
|
||||||
|
# None is a valid value for the frame argument
|
||||||
|
from gevent import Greenlet
|
||||||
|
greenlet = Greenlet(_child_handler, _signal.SIGCHLD, None)
|
||||||
|
greenlet.switch()
|
||||||
|
|
||||||
|
|
||||||
|
import gevent.os
|
||||||
|
|
||||||
|
if 'waitpid' in gevent.os.__implements__ and hasattr(_signal, 'SIGCHLD'):
|
||||||
|
# Tightly coupled here to gevent.os and its waitpid implementation; only use these
|
||||||
|
# if necessary.
|
||||||
|
gevent.os._on_child_hook = _on_child_hook
|
||||||
|
__implements__.append("signal")
|
||||||
|
__implements__.append("getsignal")
|
||||||
|
else:
|
||||||
|
# XXX: This breaks test__all__ on windows
|
||||||
|
__extensions__.append("signal")
|
||||||
|
__extensions__.append("getsignal")
|
||||||
|
|
||||||
|
__imports__ = copy_globals(_signal, globals(),
|
||||||
|
names_to_ignore=__implements__ + __extensions__,
|
||||||
|
dunder_names_to_keep=())
|
||||||
|
|
||||||
|
__all__ = __implements__ + __extensions__
|
||||||
106
python/gevent/socket.py
Normal file
106
python/gevent/socket.py
Normal file
@@ -0,0 +1,106 @@
|
|||||||
|
# Copyright (c) 2009-2014 Denis Bilenko and gevent contributors. See LICENSE for details.
|
||||||
|
|
||||||
|
"""Cooperative low-level networking interface.
|
||||||
|
|
||||||
|
This module provides socket operations and some related functions.
|
||||||
|
The API of the functions and classes matches the API of the corresponding
|
||||||
|
items in the standard :mod:`socket` module exactly, but the synchronous functions
|
||||||
|
in this module only block the current greenlet and let the others run.
|
||||||
|
|
||||||
|
For convenience, exceptions (like :class:`error <socket.error>` and :class:`timeout <socket.timeout>`)
|
||||||
|
as well as the constants from the :mod:`socket` module are imported into this module.
|
||||||
|
"""
|
||||||
|
# Our import magic sadly makes this warning useless
|
||||||
|
# pylint: disable=undefined-variable
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from gevent._compat import PY3
|
||||||
|
from gevent._util import copy_globals
|
||||||
|
|
||||||
|
|
||||||
|
if PY3:
|
||||||
|
from gevent import _socket3 as _source # python 2: pylint:disable=no-name-in-module
|
||||||
|
else:
|
||||||
|
from gevent import _socket2 as _source
|
||||||
|
|
||||||
|
# define some things we're expecting to overwrite; each module
|
||||||
|
# needs to define these
|
||||||
|
__implements__ = __dns__ = __all__ = __extensions__ = __imports__ = ()
|
||||||
|
|
||||||
|
|
||||||
|
class error(Exception):
|
||||||
|
errno = None
|
||||||
|
|
||||||
|
|
||||||
|
def getfqdn(*args):
|
||||||
|
# pylint:disable=unused-argument
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
copy_globals(_source, globals(),
|
||||||
|
dunder_names_to_keep=('__implements__', '__dns__', '__all__',
|
||||||
|
'__extensions__', '__imports__', '__socket__'),
|
||||||
|
cleanup_globs=False)
|
||||||
|
|
||||||
|
# The _socket2 and _socket3 don't import things defined in
|
||||||
|
# __extensions__, to help avoid confusing reference cycles in the
|
||||||
|
# documentation and to prevent importing from the wrong place, but we
|
||||||
|
# *do* need to expose them here. (NOTE: This may lead to some sphinx
|
||||||
|
# warnings like:
|
||||||
|
# WARNING: missing attribute mentioned in :members: or __all__:
|
||||||
|
# module gevent._socket2, attribute cancel_wait
|
||||||
|
# These can be ignored.)
|
||||||
|
from gevent import _socketcommon
|
||||||
|
copy_globals(_socketcommon, globals(),
|
||||||
|
only_names=_socketcommon.__extensions__)
|
||||||
|
|
||||||
|
try:
|
||||||
|
_GLOBAL_DEFAULT_TIMEOUT = __socket__._GLOBAL_DEFAULT_TIMEOUT
|
||||||
|
except AttributeError:
|
||||||
|
_GLOBAL_DEFAULT_TIMEOUT = object()
|
||||||
|
|
||||||
|
|
||||||
|
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None):
|
||||||
|
"""Connect to *address* and return the socket object.
|
||||||
|
|
||||||
|
Convenience function. Connect to *address* (a 2-tuple ``(host,
|
||||||
|
port)``) and return the socket object. Passing the optional
|
||||||
|
*timeout* parameter will set the timeout on the socket instance
|
||||||
|
before attempting to connect. If no *timeout* is supplied, the
|
||||||
|
global default timeout setting returned by :func:`getdefaulttimeout`
|
||||||
|
is used. If *source_address* is set it must be a tuple of (host, port)
|
||||||
|
for the socket to bind as a source address before making the connection.
|
||||||
|
A host of '' or port 0 tells the OS to use the default.
|
||||||
|
"""
|
||||||
|
|
||||||
|
host, port = address
|
||||||
|
err = None
|
||||||
|
for res in getaddrinfo(host, port, 0 if has_ipv6 else AF_INET, SOCK_STREAM):
|
||||||
|
af, socktype, proto, _, sa = res
|
||||||
|
sock = None
|
||||||
|
try:
|
||||||
|
sock = socket(af, socktype, proto)
|
||||||
|
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
|
||||||
|
sock.settimeout(timeout)
|
||||||
|
if source_address:
|
||||||
|
sock.bind(source_address)
|
||||||
|
sock.connect(sa)
|
||||||
|
return sock
|
||||||
|
except error as ex:
|
||||||
|
# without exc_clear(), if connect() fails once, the socket is referenced by the frame in exc_info
|
||||||
|
# and the next bind() fails (see test__socket.TestCreateConnection)
|
||||||
|
# that does not happen with regular sockets though, because _socket.socket.connect() is a built-in.
|
||||||
|
# this is similar to "getnameinfo loses a reference" failure in test_socket.py
|
||||||
|
if not PY3:
|
||||||
|
sys.exc_clear() # pylint:disable=no-member,useless-suppression
|
||||||
|
if sock is not None:
|
||||||
|
sock.close()
|
||||||
|
err = ex
|
||||||
|
if err is not None:
|
||||||
|
raise err # pylint:disable=raising-bad-type
|
||||||
|
else:
|
||||||
|
raise error("getaddrinfo returns an empty list")
|
||||||
|
|
||||||
|
# This is promised to be in the __all__ of the _source, but, for circularity reasons,
|
||||||
|
# we implement it in this module. Mostly for documentation purposes, put it
|
||||||
|
# in the _source too.
|
||||||
|
_source.create_connection = create_connection
|
||||||
26
python/gevent/ssl.py
Normal file
26
python/gevent/ssl.py
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
"""
|
||||||
|
Secure Sockets Layer (SSL/TLS) module.
|
||||||
|
"""
|
||||||
|
from gevent._compat import PY2
|
||||||
|
from gevent._util import copy_globals
|
||||||
|
|
||||||
|
# things we expect to override, here for static analysis
|
||||||
|
def wrap_socket(_sock, **_kwargs):
|
||||||
|
# pylint:disable=unused-argument
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
if PY2:
|
||||||
|
if hasattr(__import__('ssl'), 'SSLContext'):
|
||||||
|
# It's not sufficient to check for >= 2.7.9; some distributions
|
||||||
|
# have backported most of PEP 466. Try to accommodate them. See Issue #702.
|
||||||
|
# We're just about to import ssl anyway so it's fine to import it here, just
|
||||||
|
# don't pollute the namespace
|
||||||
|
from gevent import _sslgte279 as _source
|
||||||
|
else:
|
||||||
|
from gevent import _ssl2 as _source # pragma: no cover
|
||||||
|
else:
|
||||||
|
# Py3
|
||||||
|
from gevent import _ssl3 as _source # pragma: no cover
|
||||||
|
|
||||||
|
|
||||||
|
copy_globals(_source, globals())
|
||||||
1480
python/gevent/subprocess.py
Normal file
1480
python/gevent/subprocess.py
Normal file
File diff suppressed because it is too large
Load Diff
115
python/gevent/thread.py
Normal file
115
python/gevent/thread.py
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
"""
|
||||||
|
Implementation of the standard :mod:`thread` module that spawns greenlets.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
This module is a helper for :mod:`gevent.monkey` and is not
|
||||||
|
intended to be used directly. For spawning greenlets in your
|
||||||
|
applications, prefer higher level constructs like
|
||||||
|
:class:`gevent.Greenlet` class or :func:`gevent.spawn`.
|
||||||
|
"""
|
||||||
|
from __future__ import absolute_import
|
||||||
|
import sys
|
||||||
|
|
||||||
|
__implements__ = ['allocate_lock',
|
||||||
|
'get_ident',
|
||||||
|
'exit',
|
||||||
|
'LockType',
|
||||||
|
'stack_size',
|
||||||
|
'start_new_thread',
|
||||||
|
'_local']
|
||||||
|
|
||||||
|
__imports__ = ['error']
|
||||||
|
if sys.version_info[0] <= 2:
|
||||||
|
import thread as __thread__ # pylint:disable=import-error
|
||||||
|
else:
|
||||||
|
import _thread as __thread__ # pylint:disable=import-error
|
||||||
|
__target__ = '_thread'
|
||||||
|
__imports__ += ['RLock',
|
||||||
|
'TIMEOUT_MAX',
|
||||||
|
'allocate',
|
||||||
|
'exit_thread',
|
||||||
|
'interrupt_main',
|
||||||
|
'start_new']
|
||||||
|
error = __thread__.error
|
||||||
|
from gevent._compat import PY3
|
||||||
|
from gevent._compat import PYPY
|
||||||
|
from gevent._util import copy_globals
|
||||||
|
from gevent.hub import getcurrent, GreenletExit
|
||||||
|
from gevent.greenlet import Greenlet
|
||||||
|
from gevent.lock import BoundedSemaphore
|
||||||
|
from gevent.local import local as _local
|
||||||
|
|
||||||
|
|
||||||
|
def get_ident(gr=None):
|
||||||
|
if gr is None:
|
||||||
|
gr = getcurrent()
|
||||||
|
return id(gr)
|
||||||
|
|
||||||
|
|
||||||
|
def start_new_thread(function, args=(), kwargs=None):
|
||||||
|
if kwargs is not None:
|
||||||
|
greenlet = Greenlet.spawn(function, *args, **kwargs)
|
||||||
|
else:
|
||||||
|
greenlet = Greenlet.spawn(function, *args)
|
||||||
|
return get_ident(greenlet)
|
||||||
|
|
||||||
|
|
||||||
|
class LockType(BoundedSemaphore):
|
||||||
|
# Change the ValueError into the appropriate thread error
|
||||||
|
# and any other API changes we need to make to match behaviour
|
||||||
|
_OVER_RELEASE_ERROR = __thread__.error
|
||||||
|
|
||||||
|
if PYPY and PY3:
|
||||||
|
_OVER_RELEASE_ERROR = RuntimeError
|
||||||
|
|
||||||
|
if PY3:
|
||||||
|
_TIMEOUT_MAX = __thread__.TIMEOUT_MAX # python 2: pylint:disable=no-member
|
||||||
|
|
||||||
|
def acquire(self, blocking=True, timeout=-1):
|
||||||
|
# Transform the default -1 argument into the None that our
|
||||||
|
# semaphore implementation expects, and raise the same error
|
||||||
|
# the stdlib implementation does.
|
||||||
|
if timeout == -1:
|
||||||
|
timeout = None
|
||||||
|
if not blocking and timeout is not None:
|
||||||
|
raise ValueError("can't specify a timeout for a non-blocking call")
|
||||||
|
if timeout is not None:
|
||||||
|
if timeout < 0:
|
||||||
|
# in C: if(timeout < 0 && timeout != -1)
|
||||||
|
raise ValueError("timeout value must be strictly positive")
|
||||||
|
if timeout > self._TIMEOUT_MAX:
|
||||||
|
raise OverflowError('timeout value is too large')
|
||||||
|
|
||||||
|
return BoundedSemaphore.acquire(self, blocking, timeout)
|
||||||
|
|
||||||
|
allocate_lock = LockType
|
||||||
|
|
||||||
|
|
||||||
|
def exit():
|
||||||
|
raise GreenletExit
|
||||||
|
|
||||||
|
|
||||||
|
if hasattr(__thread__, 'stack_size'):
|
||||||
|
_original_stack_size = __thread__.stack_size
|
||||||
|
|
||||||
|
def stack_size(size=None):
|
||||||
|
if size is None:
|
||||||
|
return _original_stack_size()
|
||||||
|
if size > _original_stack_size():
|
||||||
|
return _original_stack_size(size)
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
# not going to decrease stack_size, because otherwise other greenlets in this thread will suffer
|
||||||
|
else:
|
||||||
|
__implements__.remove('stack_size')
|
||||||
|
|
||||||
|
__imports__ = copy_globals(__thread__, globals(),
|
||||||
|
only_names=__imports__,
|
||||||
|
ignore_missing_names=True)
|
||||||
|
|
||||||
|
__all__ = __implements__ + __imports__
|
||||||
|
__all__.remove('_local')
|
||||||
|
|
||||||
|
# XXX interrupt_main
|
||||||
|
# XXX _count()
|
||||||
231
python/gevent/threading.py
Normal file
231
python/gevent/threading.py
Normal file
@@ -0,0 +1,231 @@
|
|||||||
|
"""
|
||||||
|
Implementation of the standard :mod:`threading` using greenlets.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
This module is a helper for :mod:`gevent.monkey` and is not
|
||||||
|
intended to be used directly. For spawning greenlets in your
|
||||||
|
applications, prefer higher level constructs like
|
||||||
|
:class:`gevent.Greenlet` class or :func:`gevent.spawn`.
|
||||||
|
"""
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
|
||||||
|
__implements__ = [
|
||||||
|
'local',
|
||||||
|
'_start_new_thread',
|
||||||
|
'_allocate_lock',
|
||||||
|
'Lock',
|
||||||
|
'_get_ident',
|
||||||
|
'_sleep',
|
||||||
|
'_DummyThread',
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
import threading as __threading__
|
||||||
|
_DummyThread_ = __threading__._DummyThread
|
||||||
|
from gevent.local import local
|
||||||
|
from gevent.thread import start_new_thread as _start_new_thread, allocate_lock as _allocate_lock, get_ident as _get_ident
|
||||||
|
from gevent._compat import PYPY
|
||||||
|
from gevent.hub import sleep as _sleep, getcurrent
|
||||||
|
|
||||||
|
# Exports, prevent unused import warnings
|
||||||
|
local = local
|
||||||
|
start_new_thread = _start_new_thread
|
||||||
|
allocate_lock = _allocate_lock
|
||||||
|
_get_ident = _get_ident
|
||||||
|
_sleep = _sleep
|
||||||
|
getcurrent = getcurrent
|
||||||
|
|
||||||
|
Lock = _allocate_lock
|
||||||
|
|
||||||
|
|
||||||
|
def _cleanup(g):
|
||||||
|
__threading__._active.pop(id(g), None)
|
||||||
|
|
||||||
|
def _make_cleanup_id(gid):
|
||||||
|
def _(_r):
|
||||||
|
__threading__._active.pop(gid, None)
|
||||||
|
return _
|
||||||
|
|
||||||
|
_weakref = None
|
||||||
|
|
||||||
|
class _DummyThread(_DummyThread_):
|
||||||
|
# We avoid calling the superclass constructor. This makes us about
|
||||||
|
# twice as fast (1.16 vs 0.68usec on PyPy, 29.3 vs 17.7usec on
|
||||||
|
# CPython 2.7), and has the important effect of avoiding
|
||||||
|
# allocation and then immediate deletion of _Thread__block, a
|
||||||
|
# lock. This is especially important on PyPy where locks go
|
||||||
|
# through the cpyext API and Cython, which is known to be slow and
|
||||||
|
# potentially buggy (e.g.,
|
||||||
|
# https://bitbucket.org/pypy/pypy/issues/2149/memory-leak-for-python-subclass-of-cpyext#comment-22347393)
|
||||||
|
|
||||||
|
# These objects are constructed quite frequently in some cases, so
|
||||||
|
# the optimization matters: for example, in gunicorn, which uses
|
||||||
|
# pywsgi.WSGIServer, every request is handled in a new greenlet,
|
||||||
|
# and every request uses a logging.Logger to write the access log,
|
||||||
|
# and every call to a log method captures the current thread (by
|
||||||
|
# default).
|
||||||
|
#
|
||||||
|
# (Obviously we have to duplicate the effects of the constructor,
|
||||||
|
# at least for external state purposes, which is potentially
|
||||||
|
# slightly fragile.)
|
||||||
|
|
||||||
|
# For the same reason, instances of this class will cleanup their own entry
|
||||||
|
# in ``threading._active``
|
||||||
|
|
||||||
|
# Capture the static things as class vars to save on memory/
|
||||||
|
# construction time.
|
||||||
|
# In Py2, they're all private; in Py3, they become protected
|
||||||
|
_Thread__stopped = _is_stopped = _stopped = False
|
||||||
|
_Thread__initialized = _initialized = True
|
||||||
|
_Thread__daemonic = _daemonic = True
|
||||||
|
_Thread__args = _args = ()
|
||||||
|
_Thread__kwargs = _kwargs = None
|
||||||
|
_Thread__target = _target = None
|
||||||
|
_Thread_ident = _ident = None
|
||||||
|
_Thread__started = _started = __threading__.Event()
|
||||||
|
_Thread__started.set()
|
||||||
|
_tstate_lock = None
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
#_DummyThread_.__init__(self) # pylint:disable=super-init-not-called
|
||||||
|
|
||||||
|
# It'd be nice to use a pattern like "greenlet-%d", but maybe somebody out
|
||||||
|
# there is checking thread names...
|
||||||
|
self._name = self._Thread__name = __threading__._newname("DummyThread-%d")
|
||||||
|
self._set_ident()
|
||||||
|
|
||||||
|
g = getcurrent()
|
||||||
|
gid = _get_ident(g) # same as id(g)
|
||||||
|
__threading__._active[gid] = self
|
||||||
|
rawlink = getattr(g, 'rawlink', None)
|
||||||
|
if rawlink is not None:
|
||||||
|
# raw greenlet.greenlet greenlets don't
|
||||||
|
# have rawlink...
|
||||||
|
rawlink(_cleanup)
|
||||||
|
else:
|
||||||
|
# ... so for them we use weakrefs.
|
||||||
|
# See https://github.com/gevent/gevent/issues/918
|
||||||
|
global _weakref
|
||||||
|
if _weakref is None:
|
||||||
|
_weakref = __import__('weakref')
|
||||||
|
ref = _weakref.ref(g, _make_cleanup_id(gid))
|
||||||
|
self.__raw_ref = ref
|
||||||
|
|
||||||
|
def _Thread__stop(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
_stop = _Thread__stop # py3
|
||||||
|
|
||||||
|
def _wait_for_tstate_lock(self, *args, **kwargs):
|
||||||
|
# pylint:disable=arguments-differ
|
||||||
|
pass
|
||||||
|
|
||||||
|
if hasattr(__threading__, 'main_thread'): # py 3.4+
|
||||||
|
def main_native_thread():
|
||||||
|
return __threading__.main_thread() # pylint:disable=no-member
|
||||||
|
else:
|
||||||
|
_main_threads = [(_k, _v) for _k, _v in __threading__._active.items()
|
||||||
|
if isinstance(_v, __threading__._MainThread)]
|
||||||
|
assert len(_main_threads) == 1, "Too many main threads"
|
||||||
|
|
||||||
|
def main_native_thread():
|
||||||
|
return _main_threads[0][1]
|
||||||
|
|
||||||
|
# Make sure the MainThread can be found by our current greenlet ID,
|
||||||
|
# otherwise we get a new DummyThread, which cannot be joined.
|
||||||
|
# Fixes tests in test_threading_2 under PyPy, and generally makes things nicer
|
||||||
|
# when gevent.threading is imported before monkey patching or not at all
|
||||||
|
# XXX: This assumes that the import is happening in the "main" greenlet/thread.
|
||||||
|
# XXX: We should really only be doing this from gevent.monkey.
|
||||||
|
if _get_ident() not in __threading__._active:
|
||||||
|
_v = main_native_thread()
|
||||||
|
_k = _v.ident
|
||||||
|
del __threading__._active[_k]
|
||||||
|
_v._ident = _v._Thread__ident = _get_ident()
|
||||||
|
__threading__._active[_get_ident()] = _v
|
||||||
|
del _k
|
||||||
|
del _v
|
||||||
|
|
||||||
|
# Avoid printing an error on shutdown trying to remove the thread entry
|
||||||
|
# we just replaced if we're not fully monkey patched in
|
||||||
|
# XXX: This causes a hang on PyPy for some unknown reason (as soon as class _active
|
||||||
|
# defines __delitem__, shutdown hangs. Maybe due to something with the GC?
|
||||||
|
# XXX: This may be fixed in 2.6.1+
|
||||||
|
if not PYPY:
|
||||||
|
# pylint:disable=no-member
|
||||||
|
_MAIN_THREAD = __threading__._get_ident() if hasattr(__threading__, '_get_ident') else __threading__.get_ident()
|
||||||
|
|
||||||
|
class _active(dict):
|
||||||
|
def __delitem__(self, k):
|
||||||
|
if k == _MAIN_THREAD and k not in self:
|
||||||
|
return
|
||||||
|
dict.__delitem__(self, k)
|
||||||
|
|
||||||
|
__threading__._active = _active(__threading__._active)
|
||||||
|
|
||||||
|
|
||||||
|
import sys
|
||||||
|
if sys.version_info[:2] >= (3, 4):
|
||||||
|
# XXX: Issue 18808 breaks us on Python 3.4.
|
||||||
|
# Thread objects now expect a callback from the interpreter itself
|
||||||
|
# (threadmodule.c:release_sentinel). Because this never happens
|
||||||
|
# when a greenlet exits, join() and friends will block forever.
|
||||||
|
# The solution below involves capturing the greenlet when it is
|
||||||
|
# started and deferring the known broken methods to it.
|
||||||
|
|
||||||
|
class Thread(__threading__.Thread):
|
||||||
|
_greenlet = None
|
||||||
|
|
||||||
|
def is_alive(self):
|
||||||
|
return bool(self._greenlet)
|
||||||
|
|
||||||
|
isAlive = is_alive
|
||||||
|
|
||||||
|
def _set_tstate_lock(self):
|
||||||
|
self._greenlet = getcurrent()
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
try:
|
||||||
|
super(Thread, self).run()
|
||||||
|
finally:
|
||||||
|
# avoid ref cycles, but keep in __dict__ so we can
|
||||||
|
# distinguish the started/never-started case
|
||||||
|
self._greenlet = None
|
||||||
|
self._stop() # mark as finished
|
||||||
|
|
||||||
|
def join(self, timeout=None):
|
||||||
|
if '_greenlet' not in self.__dict__:
|
||||||
|
raise RuntimeError("Cannot join an inactive thread")
|
||||||
|
if self._greenlet is None:
|
||||||
|
return
|
||||||
|
self._greenlet.join(timeout=timeout)
|
||||||
|
|
||||||
|
def _wait_for_tstate_lock(self, *args, **kwargs):
|
||||||
|
# pylint:disable=arguments-differ
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
__implements__.append('Thread')
|
||||||
|
|
||||||
|
# The main thread is patched up with more care in monkey.py
|
||||||
|
#t = __threading__.current_thread()
|
||||||
|
#if isinstance(t, __threading__.Thread):
|
||||||
|
# t.__class__ = Thread
|
||||||
|
# t._greenlet = getcurrent()
|
||||||
|
|
||||||
|
if sys.version_info[:2] >= (3, 3):
|
||||||
|
__implements__.remove('_get_ident')
|
||||||
|
__implements__.append('get_ident')
|
||||||
|
get_ident = _get_ident
|
||||||
|
__implements__.remove('_sleep')
|
||||||
|
|
||||||
|
# Python 3 changed the implementation of threading.RLock
|
||||||
|
# Previously it was a factory function around threading._RLock
|
||||||
|
# which in turn used _allocate_lock. Now, it wants to use
|
||||||
|
# threading._CRLock, which is imported from _thread.RLock and as such
|
||||||
|
# is implemented in C. So it bypasses our _allocate_lock function.
|
||||||
|
# Fortunately they left the Python fallback in place
|
||||||
|
assert hasattr(__threading__, '_CRLock'), "Unsupported Python version"
|
||||||
|
_CRLock = None
|
||||||
|
__implements__.append('_CRLock')
|
||||||
498
python/gevent/threadpool.py
Normal file
498
python/gevent/threadpool.py
Normal file
@@ -0,0 +1,498 @@
|
|||||||
|
# Copyright (c) 2012 Denis Bilenko. See LICENSE for details.
|
||||||
|
from __future__ import absolute_import
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
from gevent._compat import integer_types
|
||||||
|
from gevent.hub import get_hub, getcurrent, sleep, _get_hub
|
||||||
|
from gevent.event import AsyncResult
|
||||||
|
from gevent.greenlet import Greenlet
|
||||||
|
from gevent.pool import GroupMappingMixin
|
||||||
|
from gevent.lock import Semaphore
|
||||||
|
from gevent._threading import Lock, Queue, start_new_thread
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['ThreadPool',
|
||||||
|
'ThreadResult']
|
||||||
|
|
||||||
|
|
||||||
|
class ThreadPool(GroupMappingMixin):
|
||||||
|
"""
|
||||||
|
.. note:: The method :meth:`apply_async` will always return a new
|
||||||
|
greenlet, bypassing the threadpool entirely.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, maxsize, hub=None):
|
||||||
|
if hub is None:
|
||||||
|
hub = get_hub()
|
||||||
|
self.hub = hub
|
||||||
|
self._maxsize = 0
|
||||||
|
self.manager = None
|
||||||
|
self.pid = os.getpid()
|
||||||
|
self.fork_watcher = hub.loop.fork(ref=False)
|
||||||
|
self._init(maxsize)
|
||||||
|
|
||||||
|
def _set_maxsize(self, maxsize):
|
||||||
|
if not isinstance(maxsize, integer_types):
|
||||||
|
raise TypeError('maxsize must be integer: %r' % (maxsize, ))
|
||||||
|
if maxsize < 0:
|
||||||
|
raise ValueError('maxsize must not be negative: %r' % (maxsize, ))
|
||||||
|
difference = maxsize - self._maxsize
|
||||||
|
self._semaphore.counter += difference
|
||||||
|
self._maxsize = maxsize
|
||||||
|
self.adjust()
|
||||||
|
# make sure all currently blocking spawn() start unlocking if maxsize increased
|
||||||
|
self._semaphore._start_notify()
|
||||||
|
|
||||||
|
def _get_maxsize(self):
|
||||||
|
return self._maxsize
|
||||||
|
|
||||||
|
maxsize = property(_get_maxsize, _set_maxsize)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return '<%s at 0x%x %s/%s/%s>' % (self.__class__.__name__, id(self), len(self), self.size, self.maxsize)
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
# XXX just do unfinished_tasks property
|
||||||
|
return self.task_queue.unfinished_tasks
|
||||||
|
|
||||||
|
def _get_size(self):
|
||||||
|
return self._size
|
||||||
|
|
||||||
|
def _set_size(self, size):
|
||||||
|
if size < 0:
|
||||||
|
raise ValueError('Size of the pool cannot be negative: %r' % (size, ))
|
||||||
|
if size > self._maxsize:
|
||||||
|
raise ValueError('Size of the pool cannot be bigger than maxsize: %r > %r' % (size, self._maxsize))
|
||||||
|
if self.manager:
|
||||||
|
self.manager.kill()
|
||||||
|
while self._size < size:
|
||||||
|
self._add_thread()
|
||||||
|
delay = 0.0001
|
||||||
|
while self._size > size:
|
||||||
|
while self._size - size > self.task_queue.unfinished_tasks:
|
||||||
|
self.task_queue.put(None)
|
||||||
|
if getcurrent() is self.hub:
|
||||||
|
break
|
||||||
|
sleep(delay)
|
||||||
|
delay = min(delay * 2, .05)
|
||||||
|
if self._size:
|
||||||
|
self.fork_watcher.start(self._on_fork)
|
||||||
|
else:
|
||||||
|
self.fork_watcher.stop()
|
||||||
|
|
||||||
|
size = property(_get_size, _set_size)
|
||||||
|
|
||||||
|
def _init(self, maxsize):
|
||||||
|
self._size = 0
|
||||||
|
self._semaphore = Semaphore(1)
|
||||||
|
self._lock = Lock()
|
||||||
|
self.task_queue = Queue()
|
||||||
|
self._set_maxsize(maxsize)
|
||||||
|
|
||||||
|
def _on_fork(self):
|
||||||
|
# fork() only leaves one thread; also screws up locks;
|
||||||
|
# let's re-create locks and threads.
|
||||||
|
# NOTE: See comment in gevent.hub.reinit.
|
||||||
|
pid = os.getpid()
|
||||||
|
if pid != self.pid:
|
||||||
|
self.pid = pid
|
||||||
|
# Do not mix fork() and threads; since fork() only copies one thread
|
||||||
|
# all objects referenced by other threads has refcount that will never
|
||||||
|
# go down to 0.
|
||||||
|
self._init(self._maxsize)
|
||||||
|
|
||||||
|
def join(self):
|
||||||
|
"""Waits until all outstanding tasks have been completed."""
|
||||||
|
delay = 0.0005
|
||||||
|
while self.task_queue.unfinished_tasks > 0:
|
||||||
|
sleep(delay)
|
||||||
|
delay = min(delay * 2, .05)
|
||||||
|
|
||||||
|
def kill(self):
|
||||||
|
self.size = 0
|
||||||
|
|
||||||
|
def _adjust_step(self):
|
||||||
|
# if there is a possibility & necessity for adding a thread, do it
|
||||||
|
while self._size < self._maxsize and self.task_queue.unfinished_tasks > self._size:
|
||||||
|
self._add_thread()
|
||||||
|
# while the number of threads is more than maxsize, kill one
|
||||||
|
# we do not check what's already in task_queue - it could be all Nones
|
||||||
|
while self._size - self._maxsize > self.task_queue.unfinished_tasks:
|
||||||
|
self.task_queue.put(None)
|
||||||
|
if self._size:
|
||||||
|
self.fork_watcher.start(self._on_fork)
|
||||||
|
else:
|
||||||
|
self.fork_watcher.stop()
|
||||||
|
|
||||||
|
def _adjust_wait(self):
|
||||||
|
delay = 0.0001
|
||||||
|
while True:
|
||||||
|
self._adjust_step()
|
||||||
|
if self._size <= self._maxsize:
|
||||||
|
return
|
||||||
|
sleep(delay)
|
||||||
|
delay = min(delay * 2, .05)
|
||||||
|
|
||||||
|
def adjust(self):
|
||||||
|
self._adjust_step()
|
||||||
|
if not self.manager and self._size > self._maxsize:
|
||||||
|
# might need to feed more Nones into the pool
|
||||||
|
self.manager = Greenlet.spawn(self._adjust_wait)
|
||||||
|
|
||||||
|
def _add_thread(self):
|
||||||
|
with self._lock:
|
||||||
|
self._size += 1
|
||||||
|
try:
|
||||||
|
start_new_thread(self._worker, ())
|
||||||
|
except:
|
||||||
|
with self._lock:
|
||||||
|
self._size -= 1
|
||||||
|
raise
|
||||||
|
|
||||||
|
def spawn(self, func, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Add a new task to the threadpool that will run ``func(*args, **kwargs)``.
|
||||||
|
|
||||||
|
Waits until a slot is available. Creates a new thread if necessary.
|
||||||
|
|
||||||
|
:return: A :class:`gevent.event.AsyncResult`.
|
||||||
|
"""
|
||||||
|
while True:
|
||||||
|
semaphore = self._semaphore
|
||||||
|
semaphore.acquire()
|
||||||
|
if semaphore is self._semaphore:
|
||||||
|
break
|
||||||
|
|
||||||
|
thread_result = None
|
||||||
|
try:
|
||||||
|
task_queue = self.task_queue
|
||||||
|
result = AsyncResult()
|
||||||
|
# XXX We're calling the semaphore release function in the hub, otherwise
|
||||||
|
# we get LoopExit (why?). Previously it was done with a rawlink on the
|
||||||
|
# AsyncResult and the comment that it is "competing for order with get(); this is not
|
||||||
|
# good, just make ThreadResult release the semaphore before doing anything else"
|
||||||
|
thread_result = ThreadResult(result, hub=self.hub, call_when_ready=semaphore.release)
|
||||||
|
task_queue.put((func, args, kwargs, thread_result))
|
||||||
|
self.adjust()
|
||||||
|
except:
|
||||||
|
if thread_result is not None:
|
||||||
|
thread_result.destroy()
|
||||||
|
semaphore.release()
|
||||||
|
raise
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _decrease_size(self):
|
||||||
|
if sys is None:
|
||||||
|
return
|
||||||
|
_lock = getattr(self, '_lock', None)
|
||||||
|
if _lock is not None:
|
||||||
|
with _lock:
|
||||||
|
self._size -= 1
|
||||||
|
|
||||||
|
_destroy_worker_hub = False
|
||||||
|
|
||||||
|
def _worker(self):
|
||||||
|
# pylint:disable=too-many-branches
|
||||||
|
need_decrease = True
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
task_queue = self.task_queue
|
||||||
|
task = task_queue.get()
|
||||||
|
try:
|
||||||
|
if task is None:
|
||||||
|
need_decrease = False
|
||||||
|
self._decrease_size()
|
||||||
|
# we want first to decrease size, then decrease unfinished_tasks
|
||||||
|
# otherwise, _adjust might think there's one more idle thread that
|
||||||
|
# needs to be killed
|
||||||
|
return
|
||||||
|
func, args, kwargs, thread_result = task
|
||||||
|
try:
|
||||||
|
value = func(*args, **kwargs)
|
||||||
|
except: # pylint:disable=bare-except
|
||||||
|
exc_info = getattr(sys, 'exc_info', None)
|
||||||
|
if exc_info is None:
|
||||||
|
return
|
||||||
|
thread_result.handle_error((self, func), exc_info())
|
||||||
|
else:
|
||||||
|
if sys is None:
|
||||||
|
return
|
||||||
|
thread_result.set(value)
|
||||||
|
del value
|
||||||
|
finally:
|
||||||
|
del func, args, kwargs, thread_result, task
|
||||||
|
finally:
|
||||||
|
if sys is None:
|
||||||
|
return # pylint:disable=lost-exception
|
||||||
|
task_queue.task_done()
|
||||||
|
finally:
|
||||||
|
if need_decrease:
|
||||||
|
self._decrease_size()
|
||||||
|
if sys is not None and self._destroy_worker_hub:
|
||||||
|
hub = _get_hub()
|
||||||
|
if hub is not None:
|
||||||
|
hub.destroy(True)
|
||||||
|
del hub
|
||||||
|
|
||||||
|
def apply_e(self, expected_errors, function, args=None, kwargs=None):
|
||||||
|
"""
|
||||||
|
.. deprecated:: 1.1a2
|
||||||
|
Identical to :meth:`apply`; the ``expected_errors`` argument is ignored.
|
||||||
|
"""
|
||||||
|
# pylint:disable=unused-argument
|
||||||
|
# Deprecated but never documented. In the past, before
|
||||||
|
# self.apply() allowed all errors to be raised to the caller,
|
||||||
|
# expected_errors allowed a caller to specify a set of errors
|
||||||
|
# they wanted to be raised, through the wrap_errors function.
|
||||||
|
# In practice, it always took the value Exception or
|
||||||
|
# BaseException.
|
||||||
|
return self.apply(function, args, kwargs)
|
||||||
|
|
||||||
|
def _apply_immediately(self):
|
||||||
|
# If we're being called from a different thread than the one that
|
||||||
|
# created us, e.g., because a worker task is trying to use apply()
|
||||||
|
# recursively, we have no choice but to run the task immediately;
|
||||||
|
# if we try to AsyncResult.get() in the worker thread, it's likely to have
|
||||||
|
# nothing to switch to and lead to a LoopExit.
|
||||||
|
return get_hub() is not self.hub
|
||||||
|
|
||||||
|
def _apply_async_cb_spawn(self, callback, result):
|
||||||
|
callback(result)
|
||||||
|
|
||||||
|
def _apply_async_use_greenlet(self):
|
||||||
|
# Always go to Greenlet because our self.spawn uses threads
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
class ThreadResult(object):
|
||||||
|
|
||||||
|
# Using slots here helps to debug reference cycles/leaks
|
||||||
|
__slots__ = ('exc_info', 'async', '_call_when_ready', 'value',
|
||||||
|
'context', 'hub', 'receiver')
|
||||||
|
|
||||||
|
def __init__(self, receiver, hub=None, call_when_ready=None):
|
||||||
|
if hub is None:
|
||||||
|
hub = get_hub()
|
||||||
|
self.receiver = receiver
|
||||||
|
self.hub = hub
|
||||||
|
self.context = None
|
||||||
|
self.value = None
|
||||||
|
self.exc_info = ()
|
||||||
|
self.async = hub.loop.async()
|
||||||
|
self._call_when_ready = call_when_ready
|
||||||
|
self.async.start(self._on_async)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def exception(self):
|
||||||
|
return self.exc_info[1] if self.exc_info else None
|
||||||
|
|
||||||
|
def _on_async(self):
|
||||||
|
self.async.stop()
|
||||||
|
if self._call_when_ready:
|
||||||
|
# Typically this is pool.semaphore.release and we have to
|
||||||
|
# call this in the Hub; if we don't we get the dreaded
|
||||||
|
# LoopExit (XXX: Why?)
|
||||||
|
self._call_when_ready()
|
||||||
|
try:
|
||||||
|
if self.exc_info:
|
||||||
|
self.hub.handle_error(self.context, *self.exc_info)
|
||||||
|
self.context = None
|
||||||
|
self.async = None
|
||||||
|
self.hub = None
|
||||||
|
self._call_when_ready = None
|
||||||
|
if self.receiver is not None:
|
||||||
|
self.receiver(self)
|
||||||
|
finally:
|
||||||
|
self.receiver = None
|
||||||
|
self.value = None
|
||||||
|
if self.exc_info:
|
||||||
|
self.exc_info = (self.exc_info[0], self.exc_info[1], None)
|
||||||
|
|
||||||
|
def destroy(self):
|
||||||
|
if self.async is not None:
|
||||||
|
self.async.stop()
|
||||||
|
self.async = None
|
||||||
|
self.context = None
|
||||||
|
self.hub = None
|
||||||
|
self._call_when_ready = None
|
||||||
|
self.receiver = None
|
||||||
|
|
||||||
|
def _ready(self):
|
||||||
|
if self.async is not None:
|
||||||
|
self.async.send()
|
||||||
|
|
||||||
|
def set(self, value):
|
||||||
|
self.value = value
|
||||||
|
self._ready()
|
||||||
|
|
||||||
|
def handle_error(self, context, exc_info):
|
||||||
|
self.context = context
|
||||||
|
self.exc_info = exc_info
|
||||||
|
self._ready()
|
||||||
|
|
||||||
|
# link protocol:
|
||||||
|
def successful(self):
|
||||||
|
return self.exception is None
|
||||||
|
|
||||||
|
|
||||||
|
def wrap_errors(errors, function, args, kwargs):
|
||||||
|
"""
|
||||||
|
.. deprecated:: 1.1a2
|
||||||
|
Previously used by ThreadPool.apply_e.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return True, function(*args, **kwargs)
|
||||||
|
except errors as ex:
|
||||||
|
return False, ex
|
||||||
|
|
||||||
|
try:
|
||||||
|
import concurrent.futures
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
__all__.append("ThreadPoolExecutor")
|
||||||
|
|
||||||
|
from gevent.timeout import Timeout as GTimeout
|
||||||
|
from gevent._util import Lazy
|
||||||
|
from concurrent.futures import _base as cfb
|
||||||
|
|
||||||
|
def _wrap_error(future, fn):
|
||||||
|
def cbwrap(_):
|
||||||
|
del _
|
||||||
|
# we're called with the async result, but
|
||||||
|
# be sure to pass in ourself. Also automatically
|
||||||
|
# unlink ourself so that we don't get called multiple
|
||||||
|
# times.
|
||||||
|
try:
|
||||||
|
fn(future)
|
||||||
|
except Exception: # pylint: disable=broad-except
|
||||||
|
future.hub.print_exception((fn, future), *sys.exc_info())
|
||||||
|
cbwrap.auto_unlink = True
|
||||||
|
return cbwrap
|
||||||
|
|
||||||
|
def _wrap(future, fn):
|
||||||
|
def f(_):
|
||||||
|
fn(future)
|
||||||
|
f.auto_unlink = True
|
||||||
|
return f
|
||||||
|
|
||||||
|
class _FutureProxy(object):
|
||||||
|
def __init__(self, asyncresult):
|
||||||
|
self.asyncresult = asyncresult
|
||||||
|
|
||||||
|
# Internal implementation details of a c.f.Future
|
||||||
|
|
||||||
|
@Lazy
|
||||||
|
def _condition(self):
|
||||||
|
from gevent import monkey
|
||||||
|
if monkey.is_module_patched('threading') or self.done():
|
||||||
|
import threading
|
||||||
|
return threading.Condition()
|
||||||
|
# We can only properly work with conditions
|
||||||
|
# when we've been monkey-patched. This is necessary
|
||||||
|
# for the wait/as_completed module functions.
|
||||||
|
raise AttributeError("_condition")
|
||||||
|
|
||||||
|
@Lazy
|
||||||
|
def _waiters(self):
|
||||||
|
self.asyncresult.rawlink(self.__when_done)
|
||||||
|
return []
|
||||||
|
|
||||||
|
def __when_done(self, _):
|
||||||
|
# We should only be called when _waiters has
|
||||||
|
# already been accessed.
|
||||||
|
waiters = getattr(self, '_waiters')
|
||||||
|
for w in waiters: # pylint:disable=not-an-iterable
|
||||||
|
if self.successful():
|
||||||
|
w.add_result(self)
|
||||||
|
else:
|
||||||
|
w.add_exception(self)
|
||||||
|
|
||||||
|
__when_done.auto_unlink = True
|
||||||
|
|
||||||
|
@property
|
||||||
|
def _state(self):
|
||||||
|
if self.done():
|
||||||
|
return cfb.FINISHED
|
||||||
|
return cfb.RUNNING
|
||||||
|
|
||||||
|
def set_running_or_notify_cancel(self):
|
||||||
|
# Does nothing, not even any consistency checks. It's
|
||||||
|
# meant to be internal to the executor and we don't use it.
|
||||||
|
return
|
||||||
|
|
||||||
|
def result(self, timeout=None):
|
||||||
|
try:
|
||||||
|
return self.asyncresult.result(timeout=timeout)
|
||||||
|
except GTimeout:
|
||||||
|
# XXX: Theoretically this could be a completely
|
||||||
|
# unrelated timeout instance. Do we care about that?
|
||||||
|
raise concurrent.futures.TimeoutError()
|
||||||
|
|
||||||
|
def exception(self, timeout=None):
|
||||||
|
try:
|
||||||
|
self.asyncresult.get(timeout=timeout)
|
||||||
|
return self.asyncresult.exception
|
||||||
|
except GTimeout:
|
||||||
|
raise concurrent.futures.TimeoutError()
|
||||||
|
|
||||||
|
def add_done_callback(self, fn):
|
||||||
|
if self.done():
|
||||||
|
fn(self)
|
||||||
|
else:
|
||||||
|
self.asyncresult.rawlink(_wrap_error(self, fn))
|
||||||
|
|
||||||
|
def rawlink(self, fn):
|
||||||
|
self.asyncresult.rawlink(_wrap(self, fn))
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return str(self.asyncresult)
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
return getattr(self.asyncresult, name)
|
||||||
|
|
||||||
|
class ThreadPoolExecutor(concurrent.futures.ThreadPoolExecutor):
|
||||||
|
"""
|
||||||
|
A version of :class:`concurrent.futures.ThreadPoolExecutor` that
|
||||||
|
always uses native threads, even when threading is monkey-patched.
|
||||||
|
|
||||||
|
The ``Future`` objects returned from this object can be used
|
||||||
|
with gevent waiting primitives like :func:`gevent.wait`.
|
||||||
|
|
||||||
|
.. caution:: If threading is *not* monkey-patched, then the ``Future``
|
||||||
|
objects returned by this object are not guaranteed to work with
|
||||||
|
:func:`~concurrent.futures.as_completed` and :func:`~concurrent.futures.wait`.
|
||||||
|
The individual blocking methods like :meth:`~concurrent.futures.Future.result`
|
||||||
|
and :meth:`~concurrent.futures.Future.exception` will always work.
|
||||||
|
|
||||||
|
.. versionadded:: 1.2a1
|
||||||
|
This is a provisional API.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, max_workers):
|
||||||
|
super(ThreadPoolExecutor, self).__init__(max_workers)
|
||||||
|
self._threadpool = ThreadPool(max_workers)
|
||||||
|
self._threadpool._destroy_worker_hub = True
|
||||||
|
|
||||||
|
def submit(self, fn, *args, **kwargs):
|
||||||
|
with self._shutdown_lock: # pylint:disable=not-context-manager
|
||||||
|
if self._shutdown:
|
||||||
|
raise RuntimeError('cannot schedule new futures after shutdown')
|
||||||
|
|
||||||
|
future = self._threadpool.spawn(fn, *args, **kwargs)
|
||||||
|
return _FutureProxy(future)
|
||||||
|
|
||||||
|
def shutdown(self, wait=True):
|
||||||
|
super(ThreadPoolExecutor, self).shutdown(wait)
|
||||||
|
# XXX: We don't implement wait properly
|
||||||
|
kill = getattr(self._threadpool, 'kill', None)
|
||||||
|
if kill: # pylint:disable=using-constant-test
|
||||||
|
self._threadpool.kill()
|
||||||
|
self._threadpool = None
|
||||||
|
|
||||||
|
kill = shutdown # greentest compat
|
||||||
|
|
||||||
|
def _adjust_thread_count(self):
|
||||||
|
# Does nothing. We don't want to spawn any "threads",
|
||||||
|
# let the threadpool handle that.
|
||||||
|
pass
|
||||||
261
python/gevent/timeout.py
Normal file
261
python/gevent/timeout.py
Normal file
@@ -0,0 +1,261 @@
|
|||||||
|
# Copyright (c) 2009-2010 Denis Bilenko. See LICENSE for details.
|
||||||
|
"""
|
||||||
|
Timeouts.
|
||||||
|
|
||||||
|
Many functions in :mod:`gevent` have a *timeout* argument that allows
|
||||||
|
limiting the time the function will block. When that is not available,
|
||||||
|
the :class:`Timeout` class and :func:`with_timeout` function in this
|
||||||
|
module add timeouts to arbitrary code.
|
||||||
|
|
||||||
|
.. warning::
|
||||||
|
|
||||||
|
Timeouts can only work when the greenlet switches to the hub.
|
||||||
|
If a blocking function is called or an intense calculation is ongoing during
|
||||||
|
which no switches occur, :class:`Timeout` is powerless.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from gevent._compat import string_types
|
||||||
|
from gevent.hub import getcurrent, _NONE, get_hub
|
||||||
|
|
||||||
|
__all__ = ['Timeout',
|
||||||
|
'with_timeout']
|
||||||
|
|
||||||
|
|
||||||
|
class _FakeTimer(object):
|
||||||
|
# An object that mimics the API of get_hub().loop.timer, but
|
||||||
|
# without allocating any native resources. This is useful for timeouts
|
||||||
|
# that will never expire.
|
||||||
|
# Also partially mimics the API of Timeout itself for use in _start_new_or_dummy
|
||||||
|
pending = False
|
||||||
|
active = False
|
||||||
|
|
||||||
|
def start(self, *args, **kwargs):
|
||||||
|
# pylint:disable=unused-argument
|
||||||
|
raise AssertionError("non-expiring timer cannot be started")
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
return
|
||||||
|
|
||||||
|
def cancel(self):
|
||||||
|
return
|
||||||
|
|
||||||
|
_FakeTimer = _FakeTimer()
|
||||||
|
|
||||||
|
|
||||||
|
class Timeout(BaseException):
|
||||||
|
"""
|
||||||
|
Raise *exception* in the current greenlet after given time period::
|
||||||
|
|
||||||
|
timeout = Timeout(seconds, exception)
|
||||||
|
timeout.start()
|
||||||
|
try:
|
||||||
|
... # exception will be raised here, after *seconds* passed since start() call
|
||||||
|
finally:
|
||||||
|
timeout.cancel()
|
||||||
|
|
||||||
|
.. note:: If the code that the timeout was protecting finishes
|
||||||
|
executing before the timeout elapses, be sure to ``cancel`` the
|
||||||
|
timeout so it is not unexpectedly raised in the future. Even if
|
||||||
|
it is raised, it is a best practice to cancel it. This
|
||||||
|
``try/finally`` construct or a ``with`` statement is a
|
||||||
|
recommended pattern.
|
||||||
|
|
||||||
|
When *exception* is omitted or ``None``, the :class:`Timeout` instance itself is raised:
|
||||||
|
|
||||||
|
>>> import gevent
|
||||||
|
>>> gevent.Timeout(0.1).start()
|
||||||
|
>>> gevent.sleep(0.2) #doctest: +IGNORE_EXCEPTION_DETAIL
|
||||||
|
Traceback (most recent call last):
|
||||||
|
...
|
||||||
|
Timeout: 0.1 seconds
|
||||||
|
|
||||||
|
To simplify starting and canceling timeouts, the ``with`` statement can be used::
|
||||||
|
|
||||||
|
with gevent.Timeout(seconds, exception) as timeout:
|
||||||
|
pass # ... code block ...
|
||||||
|
|
||||||
|
This is equivalent to the try/finally block above with one additional feature:
|
||||||
|
if *exception* is the literal ``False``, the timeout is still raised, but the context manager
|
||||||
|
suppresses it, so the code outside the with-block won't see it.
|
||||||
|
|
||||||
|
This is handy for adding a timeout to the functions that don't
|
||||||
|
support a *timeout* parameter themselves::
|
||||||
|
|
||||||
|
data = None
|
||||||
|
with gevent.Timeout(5, False):
|
||||||
|
data = mysock.makefile().readline()
|
||||||
|
if data is None:
|
||||||
|
... # 5 seconds passed without reading a line
|
||||||
|
else:
|
||||||
|
... # a line was read within 5 seconds
|
||||||
|
|
||||||
|
.. caution:: If ``readline()`` above catches and doesn't re-raise :class:`BaseException`
|
||||||
|
(for example, with a bare ``except:``), then your timeout will fail to function and control
|
||||||
|
won't be returned to you when you expect.
|
||||||
|
|
||||||
|
When catching timeouts, keep in mind that the one you catch may
|
||||||
|
not be the one you have set (a calling function may have set its
|
||||||
|
own timeout); if you going to silence a timeout, always check that
|
||||||
|
it's the instance you need::
|
||||||
|
|
||||||
|
timeout = Timeout(1)
|
||||||
|
timeout.start()
|
||||||
|
try:
|
||||||
|
...
|
||||||
|
except Timeout as t:
|
||||||
|
if t is not timeout:
|
||||||
|
raise # not my timeout
|
||||||
|
|
||||||
|
If the *seconds* argument is not given or is ``None`` (e.g.,
|
||||||
|
``Timeout()``), then the timeout will never expire and never raise
|
||||||
|
*exception*. This is convenient for creating functions which take
|
||||||
|
an optional timeout parameter of their own. (Note that this is not the same thing
|
||||||
|
as a *seconds* value of 0.)
|
||||||
|
|
||||||
|
.. caution::
|
||||||
|
A *seconds* value less than 0.0 (e.g., -1) is poorly defined. In the future,
|
||||||
|
support for negative values is likely to do the same thing as a value
|
||||||
|
if ``None``.
|
||||||
|
|
||||||
|
.. versionchanged:: 1.1b2
|
||||||
|
If *seconds* is not given or is ``None``, no longer allocate a libev
|
||||||
|
timer that will never be started.
|
||||||
|
.. versionchanged:: 1.1
|
||||||
|
Add warning about negative *seconds* values.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, seconds=None, exception=None, ref=True, priority=-1, _use_timer=True):
|
||||||
|
BaseException.__init__(self)
|
||||||
|
self.seconds = seconds
|
||||||
|
self.exception = exception
|
||||||
|
if seconds is None or not _use_timer:
|
||||||
|
# Avoid going through the timer codepath if no timeout is
|
||||||
|
# desired; this avoids some CFFI interactions on PyPy that can lead to a
|
||||||
|
# RuntimeError if this implementation is used during an `import` statement. See
|
||||||
|
# https://bitbucket.org/pypy/pypy/issues/2089/crash-in-pypy-260-linux64-with-gevent-11b1
|
||||||
|
# and https://github.com/gevent/gevent/issues/618.
|
||||||
|
# Plus, in general, it should be more efficient
|
||||||
|
self.timer = _FakeTimer
|
||||||
|
else:
|
||||||
|
self.timer = get_hub().loop.timer(seconds or 0.0, ref=ref, priority=priority)
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
"""Schedule the timeout."""
|
||||||
|
assert not self.pending, '%r is already started; to restart it, cancel it first' % self
|
||||||
|
if self.seconds is None: # "fake" timeout (never expires)
|
||||||
|
return
|
||||||
|
|
||||||
|
if self.exception is None or self.exception is False or isinstance(self.exception, string_types):
|
||||||
|
# timeout that raises self
|
||||||
|
self.timer.start(getcurrent().throw, self)
|
||||||
|
else: # regular timeout with user-provided exception
|
||||||
|
self.timer.start(getcurrent().throw, self.exception)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def start_new(cls, timeout=None, exception=None, ref=True):
|
||||||
|
"""Create a started :class:`Timeout`.
|
||||||
|
|
||||||
|
This is a shortcut, the exact action depends on *timeout*'s type:
|
||||||
|
|
||||||
|
* If *timeout* is a :class:`Timeout`, then call its :meth:`start` method
|
||||||
|
if it's not already begun.
|
||||||
|
* Otherwise, create a new :class:`Timeout` instance, passing (*timeout*, *exception*) as
|
||||||
|
arguments, then call its :meth:`start` method.
|
||||||
|
|
||||||
|
Returns the :class:`Timeout` instance.
|
||||||
|
"""
|
||||||
|
if isinstance(timeout, Timeout):
|
||||||
|
if not timeout.pending:
|
||||||
|
timeout.start()
|
||||||
|
return timeout
|
||||||
|
timeout = cls(timeout, exception, ref=ref)
|
||||||
|
timeout.start()
|
||||||
|
return timeout
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _start_new_or_dummy(timeout, exception=None):
|
||||||
|
# Internal use only in 1.1
|
||||||
|
# Return an object with a 'cancel' method; if timeout is None,
|
||||||
|
# this will be a shared instance object that does nothing. Otherwise,
|
||||||
|
# return an actual Timeout. Because negative values are hard to reason about,
|
||||||
|
# and are often used as sentinels in Python APIs, in the future it's likely
|
||||||
|
# that a negative timeout will also return the shared instance.
|
||||||
|
# This saves the previously common idiom of 'timer = Timeout.start_new(t) if t is not None else None'
|
||||||
|
# followed by 'if timer is not None: timer.cancel()'.
|
||||||
|
# That idiom was used to avoid any object allocations.
|
||||||
|
# A staticmethod is slightly faster under CPython, compared to a classmethod;
|
||||||
|
# under PyPy in synthetic benchmarks it makes no difference.
|
||||||
|
if timeout is None:
|
||||||
|
return _FakeTimer
|
||||||
|
return Timeout.start_new(timeout, exception)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def pending(self):
|
||||||
|
"""Return True if the timeout is scheduled to be raised."""
|
||||||
|
return self.timer.pending or self.timer.active
|
||||||
|
|
||||||
|
def cancel(self):
|
||||||
|
"""If the timeout is pending, cancel it. Otherwise, do nothing."""
|
||||||
|
self.timer.stop()
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
classname = type(self).__name__
|
||||||
|
if self.pending:
|
||||||
|
pending = ' pending'
|
||||||
|
else:
|
||||||
|
pending = ''
|
||||||
|
if self.exception is None:
|
||||||
|
exception = ''
|
||||||
|
else:
|
||||||
|
exception = ' exception=%r' % self.exception
|
||||||
|
return '<%s at %s seconds=%s%s%s>' % (classname, hex(id(self)), self.seconds, exception, pending)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
"""
|
||||||
|
>>> raise Timeout #doctest: +IGNORE_EXCEPTION_DETAIL
|
||||||
|
Traceback (most recent call last):
|
||||||
|
...
|
||||||
|
Timeout
|
||||||
|
"""
|
||||||
|
if self.seconds is None:
|
||||||
|
return ''
|
||||||
|
|
||||||
|
suffix = '' if self.seconds == 1 else 's'
|
||||||
|
|
||||||
|
if self.exception is None:
|
||||||
|
return '%s second%s' % (self.seconds, suffix)
|
||||||
|
if self.exception is False:
|
||||||
|
return '%s second%s (silent)' % (self.seconds, suffix)
|
||||||
|
return '%s second%s: %s' % (self.seconds, suffix, self.exception)
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
if not self.pending:
|
||||||
|
self.start()
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, typ, value, tb):
|
||||||
|
self.cancel()
|
||||||
|
if value is self and self.exception is False:
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def with_timeout(seconds, function, *args, **kwds):
|
||||||
|
"""Wrap a call to *function* with a timeout; if the called
|
||||||
|
function fails to return before the timeout, cancel it and return a
|
||||||
|
flag value, provided by *timeout_value* keyword argument.
|
||||||
|
|
||||||
|
If timeout expires but *timeout_value* is not provided, raise :class:`Timeout`.
|
||||||
|
|
||||||
|
Keyword argument *timeout_value* is not passed to *function*.
|
||||||
|
"""
|
||||||
|
timeout_value = kwds.pop("timeout_value", _NONE)
|
||||||
|
timeout = Timeout.start_new(seconds)
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
return function(*args, **kwds)
|
||||||
|
except Timeout as ex:
|
||||||
|
if ex is timeout and timeout_value is not _NONE:
|
||||||
|
return timeout_value
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
timeout.cancel()
|
||||||
60
python/gevent/util.py
Normal file
60
python/gevent/util.py
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
# Copyright (c) 2009 Denis Bilenko. See LICENSE for details.
|
||||||
|
"""
|
||||||
|
Low-level utilities.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
|
|
||||||
|
import functools
|
||||||
|
|
||||||
|
__all__ = ['wrap_errors']
|
||||||
|
|
||||||
|
|
||||||
|
class wrap_errors(object):
|
||||||
|
"""
|
||||||
|
Helper to make function return an exception, rather than raise it.
|
||||||
|
|
||||||
|
Because every exception that is unhandled by greenlet will be logged,
|
||||||
|
it is desirable to prevent non-error exceptions from leaving a greenlet.
|
||||||
|
This can done with a simple ``try/except`` construct::
|
||||||
|
|
||||||
|
def wrapped_func(*args, **kwargs):
|
||||||
|
try:
|
||||||
|
return func(*args, **kwargs)
|
||||||
|
except (TypeError, ValueError, AttributeError) as ex:
|
||||||
|
return ex
|
||||||
|
|
||||||
|
This class provides a shortcut to write that in one line::
|
||||||
|
|
||||||
|
wrapped_func = wrap_errors((TypeError, ValueError, AttributeError), func)
|
||||||
|
|
||||||
|
It also preserves ``__str__`` and ``__repr__`` of the original function.
|
||||||
|
"""
|
||||||
|
# QQQ could also support using wrap_errors as a decorator
|
||||||
|
|
||||||
|
def __init__(self, errors, func):
|
||||||
|
"""
|
||||||
|
Calling this makes a new function from *func*, such that it catches *errors* (an
|
||||||
|
:exc:`BaseException` subclass, or a tuple of :exc:`BaseException` subclasses) and
|
||||||
|
return it as a value.
|
||||||
|
"""
|
||||||
|
self.__errors = errors
|
||||||
|
self.__func = func
|
||||||
|
# Set __doc__, __wrapped__, etc, especially useful on Python 3.
|
||||||
|
functools.update_wrapper(self, func)
|
||||||
|
|
||||||
|
def __call__(self, *args, **kwargs):
|
||||||
|
func = self.__func
|
||||||
|
try:
|
||||||
|
return func(*args, **kwargs)
|
||||||
|
except self.__errors as ex:
|
||||||
|
return ex
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return str(self.__func)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return repr(self.__func)
|
||||||
|
|
||||||
|
def __getattr__(self, name):
|
||||||
|
return getattr(self.__func, name)
|
||||||
98
python/gevent/win32util.py
Normal file
98
python/gevent/win32util.py
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
# Copyright (c) 2001-2007 Twisted Matrix Laboratories.
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
# a copy of this software and associated documentation files (the
|
||||||
|
# "Software"), to deal in the Software without restriction, including
|
||||||
|
# without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
# permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
# the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be
|
||||||
|
# included in all copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
"""Error formatting function for Windows.
|
||||||
|
|
||||||
|
The code is taken from twisted.python.win32 module.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['formatError']
|
||||||
|
|
||||||
|
|
||||||
|
class _ErrorFormatter(object):
|
||||||
|
"""
|
||||||
|
Formatter for Windows error messages.
|
||||||
|
|
||||||
|
@ivar winError: A callable which takes one integer error number argument
|
||||||
|
and returns an L{exceptions.WindowsError} instance for that error (like
|
||||||
|
L{ctypes.WinError}).
|
||||||
|
|
||||||
|
@ivar formatMessage: A callable which takes one integer error number
|
||||||
|
argument and returns a C{str} giving the message for that error (like
|
||||||
|
L{win32api.FormatMessage}).
|
||||||
|
|
||||||
|
@ivar errorTab: A mapping from integer error numbers to C{str} messages
|
||||||
|
which correspond to those errors (like L{socket.errorTab}).
|
||||||
|
"""
|
||||||
|
def __init__(self, WinError, FormatMessage, errorTab):
|
||||||
|
self.winError = WinError
|
||||||
|
self.formatMessage = FormatMessage
|
||||||
|
self.errorTab = errorTab
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def fromEnvironment(cls):
|
||||||
|
"""
|
||||||
|
Get as many of the platform-specific error translation objects as
|
||||||
|
possible and return an instance of C{cls} created with them.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from ctypes import WinError
|
||||||
|
except ImportError:
|
||||||
|
WinError = None
|
||||||
|
try:
|
||||||
|
from win32api import FormatMessage
|
||||||
|
except ImportError:
|
||||||
|
FormatMessage = None
|
||||||
|
try:
|
||||||
|
from socket import errorTab
|
||||||
|
except ImportError:
|
||||||
|
errorTab = None
|
||||||
|
return cls(WinError, FormatMessage, errorTab)
|
||||||
|
|
||||||
|
def formatError(self, errorcode):
|
||||||
|
"""
|
||||||
|
Returns the string associated with a Windows error message, such as the
|
||||||
|
ones found in socket.error.
|
||||||
|
|
||||||
|
Attempts direct lookup against the win32 API via ctypes and then
|
||||||
|
pywin32 if available), then in the error table in the socket module,
|
||||||
|
then finally defaulting to C{os.strerror}.
|
||||||
|
|
||||||
|
@param errorcode: the Windows error code
|
||||||
|
@type errorcode: C{int}
|
||||||
|
|
||||||
|
@return: The error message string
|
||||||
|
@rtype: C{str}
|
||||||
|
"""
|
||||||
|
if self.winError is not None:
|
||||||
|
return str(self.winError(errorcode))
|
||||||
|
if self.formatMessage is not None:
|
||||||
|
return self.formatMessage(errorcode)
|
||||||
|
if self.errorTab is not None:
|
||||||
|
result = self.errorTab.get(errorcode)
|
||||||
|
if result is not None:
|
||||||
|
return result
|
||||||
|
return os.strerror(errorcode)
|
||||||
|
|
||||||
|
formatError = _ErrorFormatter.fromEnvironment().formatError
|
||||||
15
python/gevent/wsgi.py
Normal file
15
python/gevent/wsgi.py
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
"""Backwards compatibility alias for :mod:`gevent.pywsgi`.
|
||||||
|
|
||||||
|
In the past, this used libevent's http support, but that was dropped
|
||||||
|
with the introduction of libev. libevent's http support had several
|
||||||
|
limitations, including not supporting stream, not supporting
|
||||||
|
pipelining, and not supporting SSL.
|
||||||
|
|
||||||
|
.. deprecated:: 1.1
|
||||||
|
Use :mod:`gevent.pywsgi`
|
||||||
|
"""
|
||||||
|
|
||||||
|
from gevent.pywsgi import * # pylint:disable=wildcard-import,unused-wildcard-import
|
||||||
|
import gevent.pywsgi as _pywsgi
|
||||||
|
__all__ = _pywsgi.__all__
|
||||||
|
del _pywsgi
|
||||||
BIN
python/python.exe
Normal file
BIN
python/python.exe
Normal file
Binary file not shown.
BIN
python/python3.dll
Normal file
BIN
python/python3.dll
Normal file
Binary file not shown.
6
python/python36._pth
Normal file
6
python/python36._pth
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
python36.zip
|
||||||
|
.
|
||||||
|
..
|
||||||
|
|
||||||
|
# Uncomment to run site.main() automatically
|
||||||
|
#import site
|
||||||
BIN
python/python36.dll
Normal file
BIN
python/python36.dll
Normal file
Binary file not shown.
BIN
python/python36.zip
Normal file
BIN
python/python36.zip
Normal file
Binary file not shown.
BIN
python/pythonw.exe
Normal file
BIN
python/pythonw.exe
Normal file
Binary file not shown.
870
python/socks.py
Normal file
870
python/socks.py
Normal file
@@ -0,0 +1,870 @@
|
|||||||
|
"""SocksiPy - Python SOCKS module.
|
||||||
|
|
||||||
|
Copyright 2006 Dan-Haim. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are met:
|
||||||
|
1. Redistributions of source code must retain the above copyright notice, this
|
||||||
|
list of conditions and the following disclaimer.
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
3. Neither the name of Dan Haim nor the names of his contributors may be used
|
||||||
|
to endorse or promote products derived from this software without specific
|
||||||
|
prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
|
||||||
|
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||||
|
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
||||||
|
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||||
|
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
|
||||||
|
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||||
|
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||||
|
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|
||||||
|
This module provides a standard socket-like interface for Python
|
||||||
|
for tunneling connections through SOCKS proxies.
|
||||||
|
|
||||||
|
===============================================================================
|
||||||
|
|
||||||
|
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
|
||||||
|
for use in PyLoris (http://pyloris.sourceforge.net/)
|
||||||
|
|
||||||
|
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
|
||||||
|
mainly to merge bug fixes found in Sourceforge
|
||||||
|
|
||||||
|
Modifications made by Anorov (https://github.com/Anorov)
|
||||||
|
-Forked and renamed to PySocks
|
||||||
|
-Fixed issue with HTTP proxy failure checking (same bug that was in the
|
||||||
|
old ___recvall() method)
|
||||||
|
-Included SocksiPyHandler (sockshandler.py), to be used as a urllib2 handler,
|
||||||
|
courtesy of e000 (https://github.com/e000):
|
||||||
|
https://gist.github.com/869791#file_socksipyhandler.py
|
||||||
|
-Re-styled code to make it readable
|
||||||
|
-Aliased PROXY_TYPE_SOCKS5 -> SOCKS5 etc.
|
||||||
|
-Improved exception handling and output
|
||||||
|
-Removed irritating use of sequence indexes, replaced with tuple unpacked
|
||||||
|
variables
|
||||||
|
-Fixed up Python 3 bytestring handling - chr(0x03).encode() -> b"\x03"
|
||||||
|
-Other general fixes
|
||||||
|
-Added clarification that the HTTP proxy connection method only supports
|
||||||
|
CONNECT-style tunneling HTTP proxies
|
||||||
|
-Various small bug fixes
|
||||||
|
"""
|
||||||
|
|
||||||
|
from base64 import b64encode
|
||||||
|
from collections import Callable
|
||||||
|
from errno import EOPNOTSUPP, EINVAL, EAGAIN
|
||||||
|
import functools
|
||||||
|
from io import BytesIO
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from os import SEEK_CUR
|
||||||
|
import socket
|
||||||
|
import struct
|
||||||
|
import sys
|
||||||
|
|
||||||
|
__version__ = "1.6.7"
|
||||||
|
|
||||||
|
|
||||||
|
if os.name == "nt" and sys.version_info < (3, 0):
|
||||||
|
try:
|
||||||
|
import win_inet_pton
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError(
|
||||||
|
"To run PySocks on Windows you must install win_inet_pton")
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
PROXY_TYPE_SOCKS4 = SOCKS4 = 1
|
||||||
|
PROXY_TYPE_SOCKS5 = SOCKS5 = 2
|
||||||
|
PROXY_TYPE_HTTP = HTTP = 3
|
||||||
|
|
||||||
|
PROXY_TYPES = {"SOCKS4": SOCKS4, "SOCKS5": SOCKS5, "HTTP": HTTP}
|
||||||
|
PRINTABLE_PROXY_TYPES = dict(zip(PROXY_TYPES.values(), PROXY_TYPES.keys()))
|
||||||
|
|
||||||
|
_orgsocket = _orig_socket = socket.socket
|
||||||
|
|
||||||
|
|
||||||
|
def set_self_blocking(function):
|
||||||
|
|
||||||
|
@functools.wraps(function)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
self = args[0]
|
||||||
|
try:
|
||||||
|
_is_blocking = self.gettimeout()
|
||||||
|
if _is_blocking == 0:
|
||||||
|
self.setblocking(True)
|
||||||
|
return function(*args, **kwargs)
|
||||||
|
except Exception as e:
|
||||||
|
raise
|
||||||
|
finally:
|
||||||
|
# set orgin blocking
|
||||||
|
if _is_blocking == 0:
|
||||||
|
self.setblocking(False)
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
class ProxyError(IOError):
|
||||||
|
"""Socket_err contains original socket.error exception."""
|
||||||
|
def __init__(self, msg, socket_err=None):
|
||||||
|
self.msg = msg
|
||||||
|
self.socket_err = socket_err
|
||||||
|
|
||||||
|
if socket_err:
|
||||||
|
self.msg += ": {0}".format(socket_err)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.msg
|
||||||
|
|
||||||
|
|
||||||
|
class GeneralProxyError(ProxyError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ProxyConnectionError(ProxyError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class SOCKS5AuthError(ProxyError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class SOCKS5Error(ProxyError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class SOCKS4Error(ProxyError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPError(ProxyError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
SOCKS4_ERRORS = {
|
||||||
|
0x5B: "Request rejected or failed",
|
||||||
|
0x5C: ("Request rejected because SOCKS server cannot connect to identd on"
|
||||||
|
" the client"),
|
||||||
|
0x5D: ("Request rejected because the client program and identd report"
|
||||||
|
" different user-ids")
|
||||||
|
}
|
||||||
|
|
||||||
|
SOCKS5_ERRORS = {
|
||||||
|
0x01: "General SOCKS server failure",
|
||||||
|
0x02: "Connection not allowed by ruleset",
|
||||||
|
0x03: "Network unreachable",
|
||||||
|
0x04: "Host unreachable",
|
||||||
|
0x05: "Connection refused",
|
||||||
|
0x06: "TTL expired",
|
||||||
|
0x07: "Command not supported, or protocol error",
|
||||||
|
0x08: "Address type not supported"
|
||||||
|
}
|
||||||
|
|
||||||
|
DEFAULT_PORTS = {SOCKS4: 1080, SOCKS5: 1080, HTTP: 8080}
|
||||||
|
|
||||||
|
|
||||||
|
def set_default_proxy(proxy_type=None, addr=None, port=None, rdns=True,
|
||||||
|
username=None, password=None):
|
||||||
|
"""Sets a default proxy.
|
||||||
|
|
||||||
|
All further socksocket objects will use the default unless explicitly
|
||||||
|
changed. All parameters are as for socket.set_proxy()."""
|
||||||
|
socksocket.default_proxy = (proxy_type, addr, port, rdns,
|
||||||
|
username.encode() if username else None,
|
||||||
|
password.encode() if password else None)
|
||||||
|
|
||||||
|
|
||||||
|
def setdefaultproxy(*args, **kwargs):
|
||||||
|
if "proxytype" in kwargs:
|
||||||
|
kwargs["proxy_type"] = kwargs.pop("proxytype")
|
||||||
|
return set_default_proxy(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def get_default_proxy():
|
||||||
|
"""Returns the default proxy, set by set_default_proxy."""
|
||||||
|
return socksocket.default_proxy
|
||||||
|
|
||||||
|
getdefaultproxy = get_default_proxy
|
||||||
|
|
||||||
|
|
||||||
|
def wrap_module(module):
|
||||||
|
"""Attempts to replace a module's socket library with a SOCKS socket.
|
||||||
|
|
||||||
|
Must set a default proxy using set_default_proxy(...) first. This will
|
||||||
|
only work on modules that import socket directly into the namespace;
|
||||||
|
most of the Python Standard Library falls into this category."""
|
||||||
|
if socksocket.default_proxy:
|
||||||
|
module.socket.socket = socksocket
|
||||||
|
else:
|
||||||
|
raise GeneralProxyError("No default proxy specified")
|
||||||
|
|
||||||
|
wrapmodule = wrap_module
|
||||||
|
|
||||||
|
|
||||||
|
def create_connection(dest_pair,
|
||||||
|
timeout=None, source_address=None,
|
||||||
|
proxy_type=None, proxy_addr=None,
|
||||||
|
proxy_port=None, proxy_rdns=True,
|
||||||
|
proxy_username=None, proxy_password=None,
|
||||||
|
socket_options=None):
|
||||||
|
"""create_connection(dest_pair, *[, timeout], **proxy_args) -> socket object
|
||||||
|
|
||||||
|
Like socket.create_connection(), but connects to proxy
|
||||||
|
before returning the socket object.
|
||||||
|
|
||||||
|
dest_pair - 2-tuple of (IP/hostname, port).
|
||||||
|
**proxy_args - Same args passed to socksocket.set_proxy() if present.
|
||||||
|
timeout - Optional socket timeout value, in seconds.
|
||||||
|
source_address - tuple (host, port) for the socket to bind to as its source
|
||||||
|
address before connecting (only for compatibility)
|
||||||
|
"""
|
||||||
|
# Remove IPv6 brackets on the remote address and proxy address.
|
||||||
|
remote_host, remote_port = dest_pair
|
||||||
|
if remote_host.startswith("["):
|
||||||
|
remote_host = remote_host.strip("[]")
|
||||||
|
if proxy_addr and proxy_addr.startswith("["):
|
||||||
|
proxy_addr = proxy_addr.strip("[]")
|
||||||
|
|
||||||
|
err = None
|
||||||
|
|
||||||
|
# Allow the SOCKS proxy to be on IPv4 or IPv6 addresses.
|
||||||
|
for r in socket.getaddrinfo(proxy_addr, proxy_port, 0, socket.SOCK_STREAM):
|
||||||
|
family, socket_type, proto, canonname, sa = r
|
||||||
|
sock = None
|
||||||
|
try:
|
||||||
|
sock = socksocket(family, socket_type, proto)
|
||||||
|
|
||||||
|
if socket_options:
|
||||||
|
for opt in socket_options:
|
||||||
|
sock.setsockopt(*opt)
|
||||||
|
|
||||||
|
if isinstance(timeout, (int, float)):
|
||||||
|
sock.settimeout(timeout)
|
||||||
|
|
||||||
|
if proxy_type:
|
||||||
|
sock.set_proxy(proxy_type, proxy_addr, proxy_port, proxy_rdns,
|
||||||
|
proxy_username, proxy_password)
|
||||||
|
if source_address:
|
||||||
|
sock.bind(source_address)
|
||||||
|
|
||||||
|
sock.connect((remote_host, remote_port))
|
||||||
|
return sock
|
||||||
|
|
||||||
|
except (socket.error, ProxyConnectionError) as e:
|
||||||
|
err = e
|
||||||
|
if sock:
|
||||||
|
sock.close()
|
||||||
|
sock = None
|
||||||
|
|
||||||
|
if err:
|
||||||
|
raise err
|
||||||
|
|
||||||
|
raise socket.error("gai returned empty list.")
|
||||||
|
|
||||||
|
|
||||||
|
class _BaseSocket(socket.socket):
|
||||||
|
"""Allows Python 2 delegated methods such as send() to be overridden."""
|
||||||
|
def __init__(self, *pos, **kw):
|
||||||
|
_orig_socket.__init__(self, *pos, **kw)
|
||||||
|
|
||||||
|
self._savedmethods = dict()
|
||||||
|
for name in self._savenames:
|
||||||
|
self._savedmethods[name] = getattr(self, name)
|
||||||
|
delattr(self, name) # Allows normal overriding mechanism to work
|
||||||
|
|
||||||
|
_savenames = list()
|
||||||
|
|
||||||
|
|
||||||
|
def _makemethod(name):
|
||||||
|
return lambda self, *pos, **kw: self._savedmethods[name](*pos, **kw)
|
||||||
|
for name in ("sendto", "send", "recvfrom", "recv"):
|
||||||
|
method = getattr(_BaseSocket, name, None)
|
||||||
|
|
||||||
|
# Determine if the method is not defined the usual way
|
||||||
|
# as a function in the class.
|
||||||
|
# Python 2 uses __slots__, so there are descriptors for each method,
|
||||||
|
# but they are not functions.
|
||||||
|
if not isinstance(method, Callable):
|
||||||
|
_BaseSocket._savenames.append(name)
|
||||||
|
setattr(_BaseSocket, name, _makemethod(name))
|
||||||
|
|
||||||
|
|
||||||
|
class socksocket(_BaseSocket):
|
||||||
|
"""socksocket([family[, type[, proto]]]) -> socket object
|
||||||
|
|
||||||
|
Open a SOCKS enabled socket. The parameters are the same as
|
||||||
|
those of the standard socket init. In order for SOCKS to work,
|
||||||
|
you must specify family=AF_INET and proto=0.
|
||||||
|
The "type" argument must be either SOCK_STREAM or SOCK_DGRAM.
|
||||||
|
"""
|
||||||
|
|
||||||
|
default_proxy = None
|
||||||
|
|
||||||
|
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM,
|
||||||
|
proto=0, *args, **kwargs):
|
||||||
|
if type not in (socket.SOCK_STREAM, socket.SOCK_DGRAM):
|
||||||
|
msg = "Socket type must be stream or datagram, not {!r}"
|
||||||
|
raise ValueError(msg.format(type))
|
||||||
|
|
||||||
|
super(socksocket, self).__init__(family, type, proto, *args, **kwargs)
|
||||||
|
self._proxyconn = None # TCP connection to keep UDP relay alive
|
||||||
|
|
||||||
|
if self.default_proxy:
|
||||||
|
self.proxy = self.default_proxy
|
||||||
|
else:
|
||||||
|
self.proxy = (None, None, None, None, None, None)
|
||||||
|
self.proxy_sockname = None
|
||||||
|
self.proxy_peername = None
|
||||||
|
|
||||||
|
self._timeout = None
|
||||||
|
|
||||||
|
def _readall(self, file, count):
|
||||||
|
"""Receive EXACTLY the number of bytes requested from the file object.
|
||||||
|
|
||||||
|
Blocks until the required number of bytes have been received."""
|
||||||
|
data = b""
|
||||||
|
while len(data) < count:
|
||||||
|
d = file.read(count - len(data))
|
||||||
|
if not d:
|
||||||
|
raise GeneralProxyError("Connection closed unexpectedly")
|
||||||
|
data += d
|
||||||
|
return data
|
||||||
|
|
||||||
|
def settimeout(self, timeout):
|
||||||
|
self._timeout = timeout
|
||||||
|
try:
|
||||||
|
# test if we're connected, if so apply timeout
|
||||||
|
peer = self.get_proxy_peername()
|
||||||
|
super(socksocket, self).settimeout(self._timeout)
|
||||||
|
except socket.error:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def gettimeout(self):
|
||||||
|
return self._timeout
|
||||||
|
|
||||||
|
def setblocking(self, v):
|
||||||
|
if v:
|
||||||
|
self.settimeout(None)
|
||||||
|
else:
|
||||||
|
self.settimeout(0.0)
|
||||||
|
|
||||||
|
def set_proxy(self, proxy_type=None, addr=None, port=None, rdns=True,
|
||||||
|
username=None, password=None):
|
||||||
|
""" Sets the proxy to be used.
|
||||||
|
|
||||||
|
proxy_type - The type of the proxy to be used. Three types
|
||||||
|
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
|
||||||
|
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
|
||||||
|
addr - The address of the server (IP or DNS).
|
||||||
|
port - The port of the server. Defaults to 1080 for SOCKS
|
||||||
|
servers and 8080 for HTTP proxy servers.
|
||||||
|
rdns - Should DNS queries be performed on the remote side
|
||||||
|
(rather than the local side). The default is True.
|
||||||
|
Note: This has no effect with SOCKS4 servers.
|
||||||
|
username - Username to authenticate with to the server.
|
||||||
|
The default is no authentication.
|
||||||
|
password - Password to authenticate with to the server.
|
||||||
|
Only relevant when username is also provided."""
|
||||||
|
self.proxy = (proxy_type, addr, port, rdns,
|
||||||
|
username.encode() if username else None,
|
||||||
|
password.encode() if password else None)
|
||||||
|
|
||||||
|
def setproxy(self, *args, **kwargs):
|
||||||
|
if "proxytype" in kwargs:
|
||||||
|
kwargs["proxy_type"] = kwargs.pop("proxytype")
|
||||||
|
return self.set_proxy(*args, **kwargs)
|
||||||
|
|
||||||
|
def bind(self, *pos, **kw):
|
||||||
|
"""Implements proxy connection for UDP sockets.
|
||||||
|
|
||||||
|
Happens during the bind() phase."""
|
||||||
|
(proxy_type, proxy_addr, proxy_port, rdns, username,
|
||||||
|
password) = self.proxy
|
||||||
|
if not proxy_type or self.type != socket.SOCK_DGRAM:
|
||||||
|
return _orig_socket.bind(self, *pos, **kw)
|
||||||
|
|
||||||
|
if self._proxyconn:
|
||||||
|
raise socket.error(EINVAL, "Socket already bound to an address")
|
||||||
|
if proxy_type != SOCKS5:
|
||||||
|
msg = "UDP only supported by SOCKS5 proxy type"
|
||||||
|
raise socket.error(EOPNOTSUPP, msg)
|
||||||
|
super(socksocket, self).bind(*pos, **kw)
|
||||||
|
|
||||||
|
# Need to specify actual local port because
|
||||||
|
# some relays drop packets if a port of zero is specified.
|
||||||
|
# Avoid specifying host address in case of NAT though.
|
||||||
|
_, port = self.getsockname()
|
||||||
|
dst = ("0", port)
|
||||||
|
|
||||||
|
self._proxyconn = _orig_socket()
|
||||||
|
proxy = self._proxy_addr()
|
||||||
|
self._proxyconn.connect(proxy)
|
||||||
|
|
||||||
|
UDP_ASSOCIATE = b"\x03"
|
||||||
|
_, relay = self._SOCKS5_request(self._proxyconn, UDP_ASSOCIATE, dst)
|
||||||
|
|
||||||
|
# The relay is most likely on the same host as the SOCKS proxy,
|
||||||
|
# but some proxies return a private IP address (10.x.y.z)
|
||||||
|
host, _ = proxy
|
||||||
|
_, port = relay
|
||||||
|
super(socksocket, self).connect((host, port))
|
||||||
|
super(socksocket, self).settimeout(self._timeout)
|
||||||
|
self.proxy_sockname = ("0.0.0.0", 0) # Unknown
|
||||||
|
|
||||||
|
def sendto(self, bytes, *args, **kwargs):
|
||||||
|
if self.type != socket.SOCK_DGRAM:
|
||||||
|
return super(socksocket, self).sendto(bytes, *args, **kwargs)
|
||||||
|
if not self._proxyconn:
|
||||||
|
self.bind(("", 0))
|
||||||
|
|
||||||
|
address = args[-1]
|
||||||
|
flags = args[:-1]
|
||||||
|
|
||||||
|
header = BytesIO()
|
||||||
|
RSV = b"\x00\x00"
|
||||||
|
header.write(RSV)
|
||||||
|
STANDALONE = b"\x00"
|
||||||
|
header.write(STANDALONE)
|
||||||
|
self._write_SOCKS5_address(address, header)
|
||||||
|
|
||||||
|
sent = super(socksocket, self).send(header.getvalue() + bytes, *flags,
|
||||||
|
**kwargs)
|
||||||
|
return sent - header.tell()
|
||||||
|
|
||||||
|
def send(self, bytes, flags=0, **kwargs):
|
||||||
|
if self.type == socket.SOCK_DGRAM:
|
||||||
|
return self.sendto(bytes, flags, self.proxy_peername, **kwargs)
|
||||||
|
else:
|
||||||
|
return super(socksocket, self).send(bytes, flags, **kwargs)
|
||||||
|
|
||||||
|
def recvfrom(self, bufsize, flags=0):
|
||||||
|
if self.type != socket.SOCK_DGRAM:
|
||||||
|
return super(socksocket, self).recvfrom(bufsize, flags)
|
||||||
|
if not self._proxyconn:
|
||||||
|
self.bind(("", 0))
|
||||||
|
|
||||||
|
buf = BytesIO(super(socksocket, self).recv(bufsize + 1024, flags))
|
||||||
|
buf.seek(2, SEEK_CUR)
|
||||||
|
frag = buf.read(1)
|
||||||
|
if ord(frag):
|
||||||
|
raise NotImplementedError("Received UDP packet fragment")
|
||||||
|
fromhost, fromport = self._read_SOCKS5_address(buf)
|
||||||
|
|
||||||
|
if self.proxy_peername:
|
||||||
|
peerhost, peerport = self.proxy_peername
|
||||||
|
if fromhost != peerhost or peerport not in (0, fromport):
|
||||||
|
raise socket.error(EAGAIN, "Packet filtered")
|
||||||
|
|
||||||
|
return (buf.read(bufsize), (fromhost, fromport))
|
||||||
|
|
||||||
|
def recv(self, *pos, **kw):
|
||||||
|
bytes, _ = self.recvfrom(*pos, **kw)
|
||||||
|
return bytes
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if self._proxyconn:
|
||||||
|
self._proxyconn.close()
|
||||||
|
return super(socksocket, self).close()
|
||||||
|
|
||||||
|
def get_proxy_sockname(self):
|
||||||
|
"""Returns the bound IP address and port number at the proxy."""
|
||||||
|
return self.proxy_sockname
|
||||||
|
|
||||||
|
getproxysockname = get_proxy_sockname
|
||||||
|
|
||||||
|
def get_proxy_peername(self):
|
||||||
|
"""
|
||||||
|
Returns the IP and port number of the proxy.
|
||||||
|
"""
|
||||||
|
return self.getpeername()
|
||||||
|
|
||||||
|
getproxypeername = get_proxy_peername
|
||||||
|
|
||||||
|
def get_peername(self):
|
||||||
|
"""Returns the IP address and port number of the destination machine.
|
||||||
|
|
||||||
|
Note: get_proxy_peername returns the proxy."""
|
||||||
|
return self.proxy_peername
|
||||||
|
|
||||||
|
getpeername = get_peername
|
||||||
|
|
||||||
|
def _negotiate_SOCKS5(self, *dest_addr):
|
||||||
|
"""Negotiates a stream connection through a SOCKS5 server."""
|
||||||
|
CONNECT = b"\x01"
|
||||||
|
self.proxy_peername, self.proxy_sockname = self._SOCKS5_request(
|
||||||
|
self, CONNECT, dest_addr)
|
||||||
|
|
||||||
|
def _SOCKS5_request(self, conn, cmd, dst):
|
||||||
|
"""
|
||||||
|
Send SOCKS5 request with given command (CMD field) and
|
||||||
|
address (DST field). Returns resolved DST address that was used.
|
||||||
|
"""
|
||||||
|
proxy_type, addr, port, rdns, username, password = self.proxy
|
||||||
|
|
||||||
|
writer = conn.makefile("wb")
|
||||||
|
reader = conn.makefile("rb", 0) # buffering=0 renamed in Python 3
|
||||||
|
try:
|
||||||
|
# First we'll send the authentication packages we support.
|
||||||
|
if username and password:
|
||||||
|
# The username/password details were supplied to the
|
||||||
|
# set_proxy method so we support the USERNAME/PASSWORD
|
||||||
|
# authentication (in addition to the standard none).
|
||||||
|
writer.write(b"\x05\x02\x00\x02")
|
||||||
|
else:
|
||||||
|
# No username/password were entered, therefore we
|
||||||
|
# only support connections with no authentication.
|
||||||
|
writer.write(b"\x05\x01\x00")
|
||||||
|
|
||||||
|
# We'll receive the server's response to determine which
|
||||||
|
# method was selected
|
||||||
|
writer.flush()
|
||||||
|
chosen_auth = self._readall(reader, 2)
|
||||||
|
|
||||||
|
if chosen_auth[0:1] != b"\x05":
|
||||||
|
# Note: string[i:i+1] is used because indexing of a bytestring
|
||||||
|
# via bytestring[i] yields an integer in Python 3
|
||||||
|
raise GeneralProxyError(
|
||||||
|
"SOCKS5 proxy server sent invalid data")
|
||||||
|
|
||||||
|
# Check the chosen authentication method
|
||||||
|
|
||||||
|
if chosen_auth[1:2] == b"\x02":
|
||||||
|
# Okay, we need to perform a basic username/password
|
||||||
|
# authentication.
|
||||||
|
writer.write(b"\x01" + chr(len(username)).encode()
|
||||||
|
+ username
|
||||||
|
+ chr(len(password)).encode()
|
||||||
|
+ password)
|
||||||
|
writer.flush()
|
||||||
|
auth_status = self._readall(reader, 2)
|
||||||
|
if auth_status[0:1] != b"\x01":
|
||||||
|
# Bad response
|
||||||
|
raise GeneralProxyError(
|
||||||
|
"SOCKS5 proxy server sent invalid data")
|
||||||
|
if auth_status[1:2] != b"\x00":
|
||||||
|
# Authentication failed
|
||||||
|
raise SOCKS5AuthError("SOCKS5 authentication failed")
|
||||||
|
|
||||||
|
# Otherwise, authentication succeeded
|
||||||
|
|
||||||
|
# No authentication is required if 0x00
|
||||||
|
elif chosen_auth[1:2] != b"\x00":
|
||||||
|
# Reaching here is always bad
|
||||||
|
if chosen_auth[1:2] == b"\xFF":
|
||||||
|
raise SOCKS5AuthError(
|
||||||
|
"All offered SOCKS5 authentication methods were"
|
||||||
|
" rejected")
|
||||||
|
else:
|
||||||
|
raise GeneralProxyError(
|
||||||
|
"SOCKS5 proxy server sent invalid data")
|
||||||
|
|
||||||
|
# Now we can request the actual connection
|
||||||
|
writer.write(b"\x05" + cmd + b"\x00")
|
||||||
|
resolved = self._write_SOCKS5_address(dst, writer)
|
||||||
|
writer.flush()
|
||||||
|
|
||||||
|
# Get the response
|
||||||
|
resp = self._readall(reader, 3)
|
||||||
|
if resp[0:1] != b"\x05":
|
||||||
|
raise GeneralProxyError(
|
||||||
|
"SOCKS5 proxy server sent invalid data")
|
||||||
|
|
||||||
|
status = ord(resp[1:2])
|
||||||
|
if status != 0x00:
|
||||||
|
# Connection failed: server returned an error
|
||||||
|
error = SOCKS5_ERRORS.get(status, "Unknown error")
|
||||||
|
raise SOCKS5Error("{0:#04x}: {1}".format(status, error))
|
||||||
|
|
||||||
|
# Get the bound address/port
|
||||||
|
bnd = self._read_SOCKS5_address(reader)
|
||||||
|
|
||||||
|
super(socksocket, self).settimeout(self._timeout)
|
||||||
|
return (resolved, bnd)
|
||||||
|
finally:
|
||||||
|
reader.close()
|
||||||
|
writer.close()
|
||||||
|
|
||||||
|
def _write_SOCKS5_address(self, addr, file):
|
||||||
|
"""
|
||||||
|
Return the host and port packed for the SOCKS5 protocol,
|
||||||
|
and the resolved address as a tuple object.
|
||||||
|
"""
|
||||||
|
host, port = addr
|
||||||
|
proxy_type, _, _, rdns, username, password = self.proxy
|
||||||
|
family_to_byte = {socket.AF_INET: b"\x01", socket.AF_INET6: b"\x04"}
|
||||||
|
|
||||||
|
# If the given destination address is an IP address, we'll
|
||||||
|
# use the IP address request even if remote resolving was specified.
|
||||||
|
# Detect whether the address is IPv4/6 directly.
|
||||||
|
for family in (socket.AF_INET, socket.AF_INET6):
|
||||||
|
try:
|
||||||
|
addr_bytes = socket.inet_pton(family, host)
|
||||||
|
file.write(family_to_byte[family] + addr_bytes)
|
||||||
|
host = socket.inet_ntop(family, addr_bytes)
|
||||||
|
file.write(struct.pack(">H", port))
|
||||||
|
return host, port
|
||||||
|
except socket.error:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Well it's not an IP number, so it's probably a DNS name.
|
||||||
|
if rdns:
|
||||||
|
# Resolve remotely
|
||||||
|
host_bytes = host.encode("idna")
|
||||||
|
file.write(b"\x03" + chr(len(host_bytes)).encode() + host_bytes)
|
||||||
|
else:
|
||||||
|
# Resolve locally
|
||||||
|
addresses = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
|
||||||
|
socket.SOCK_STREAM,
|
||||||
|
socket.IPPROTO_TCP,
|
||||||
|
socket.AI_ADDRCONFIG)
|
||||||
|
# We can't really work out what IP is reachable, so just pick the
|
||||||
|
# first.
|
||||||
|
target_addr = addresses[0]
|
||||||
|
family = target_addr[0]
|
||||||
|
host = target_addr[4][0]
|
||||||
|
|
||||||
|
addr_bytes = socket.inet_pton(family, host)
|
||||||
|
file.write(family_to_byte[family] + addr_bytes)
|
||||||
|
host = socket.inet_ntop(family, addr_bytes)
|
||||||
|
file.write(struct.pack(">H", port))
|
||||||
|
return host, port
|
||||||
|
|
||||||
|
def _read_SOCKS5_address(self, file):
|
||||||
|
atyp = self._readall(file, 1)
|
||||||
|
if atyp == b"\x01":
|
||||||
|
addr = socket.inet_ntoa(self._readall(file, 4))
|
||||||
|
elif atyp == b"\x03":
|
||||||
|
length = self._readall(file, 1)
|
||||||
|
addr = self._readall(file, ord(length))
|
||||||
|
elif atyp == b"\x04":
|
||||||
|
addr = socket.inet_ntop(socket.AF_INET6, self._readall(file, 16))
|
||||||
|
else:
|
||||||
|
raise GeneralProxyError("SOCKS5 proxy server sent invalid data")
|
||||||
|
|
||||||
|
port = struct.unpack(">H", self._readall(file, 2))[0]
|
||||||
|
return addr, port
|
||||||
|
|
||||||
|
def _negotiate_SOCKS4(self, dest_addr, dest_port):
|
||||||
|
"""Negotiates a connection through a SOCKS4 server."""
|
||||||
|
proxy_type, addr, port, rdns, username, password = self.proxy
|
||||||
|
|
||||||
|
writer = self.makefile("wb")
|
||||||
|
reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3
|
||||||
|
try:
|
||||||
|
# Check if the destination address provided is an IP address
|
||||||
|
remote_resolve = False
|
||||||
|
try:
|
||||||
|
addr_bytes = socket.inet_aton(dest_addr)
|
||||||
|
except socket.error:
|
||||||
|
# It's a DNS name. Check where it should be resolved.
|
||||||
|
if rdns:
|
||||||
|
addr_bytes = b"\x00\x00\x00\x01"
|
||||||
|
remote_resolve = True
|
||||||
|
else:
|
||||||
|
addr_bytes = socket.inet_aton(
|
||||||
|
socket.gethostbyname(dest_addr))
|
||||||
|
|
||||||
|
# Construct the request packet
|
||||||
|
writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
|
||||||
|
writer.write(addr_bytes)
|
||||||
|
|
||||||
|
# The username parameter is considered userid for SOCKS4
|
||||||
|
if username:
|
||||||
|
writer.write(username)
|
||||||
|
writer.write(b"\x00")
|
||||||
|
|
||||||
|
# DNS name if remote resolving is required
|
||||||
|
# NOTE: This is actually an extension to the SOCKS4 protocol
|
||||||
|
# called SOCKS4A and may not be supported in all cases.
|
||||||
|
if remote_resolve:
|
||||||
|
writer.write(dest_addr.encode("idna") + b"\x00")
|
||||||
|
writer.flush()
|
||||||
|
|
||||||
|
# Get the response from the server
|
||||||
|
resp = self._readall(reader, 8)
|
||||||
|
if resp[0:1] != b"\x00":
|
||||||
|
# Bad data
|
||||||
|
raise GeneralProxyError(
|
||||||
|
"SOCKS4 proxy server sent invalid data")
|
||||||
|
|
||||||
|
status = ord(resp[1:2])
|
||||||
|
if status != 0x5A:
|
||||||
|
# Connection failed: server returned an error
|
||||||
|
error = SOCKS4_ERRORS.get(status, "Unknown error")
|
||||||
|
raise SOCKS4Error("{0:#04x}: {1}".format(status, error))
|
||||||
|
|
||||||
|
# Get the bound address/port
|
||||||
|
self.proxy_sockname = (socket.inet_ntoa(resp[4:]),
|
||||||
|
struct.unpack(">H", resp[2:4])[0])
|
||||||
|
if remote_resolve:
|
||||||
|
self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
|
||||||
|
else:
|
||||||
|
self.proxy_peername = dest_addr, dest_port
|
||||||
|
finally:
|
||||||
|
reader.close()
|
||||||
|
writer.close()
|
||||||
|
|
||||||
|
def _negotiate_HTTP(self, dest_addr, dest_port):
|
||||||
|
"""Negotiates a connection through an HTTP server.
|
||||||
|
|
||||||
|
NOTE: This currently only supports HTTP CONNECT-style proxies."""
|
||||||
|
proxy_type, addr, port, rdns, username, password = self.proxy
|
||||||
|
|
||||||
|
# If we need to resolve locally, we do this now
|
||||||
|
addr = dest_addr if rdns else socket.gethostbyname(dest_addr)
|
||||||
|
|
||||||
|
http_headers = [
|
||||||
|
(b"CONNECT " + addr.encode("idna") + b":"
|
||||||
|
+ str(dest_port).encode() + b" HTTP/1.1"),
|
||||||
|
b"Host: " + dest_addr.encode("idna")
|
||||||
|
]
|
||||||
|
|
||||||
|
if username and password:
|
||||||
|
http_headers.append(b"Proxy-Authorization: basic "
|
||||||
|
+ b64encode(username + b":" + password))
|
||||||
|
|
||||||
|
http_headers.append(b"\r\n")
|
||||||
|
|
||||||
|
self.sendall(b"\r\n".join(http_headers))
|
||||||
|
|
||||||
|
# We just need the first line to check if the connection was successful
|
||||||
|
fobj = self.makefile()
|
||||||
|
status_line = fobj.readline()
|
||||||
|
fobj.close()
|
||||||
|
|
||||||
|
if not status_line:
|
||||||
|
raise GeneralProxyError("Connection closed unexpectedly")
|
||||||
|
|
||||||
|
try:
|
||||||
|
proto, status_code, status_msg = status_line.split(" ", 2)
|
||||||
|
except ValueError:
|
||||||
|
raise GeneralProxyError("HTTP proxy server sent invalid response")
|
||||||
|
|
||||||
|
if not proto.startswith("HTTP/"):
|
||||||
|
raise GeneralProxyError(
|
||||||
|
"Proxy server does not appear to be an HTTP proxy")
|
||||||
|
|
||||||
|
try:
|
||||||
|
status_code = int(status_code)
|
||||||
|
except ValueError:
|
||||||
|
raise HTTPError(
|
||||||
|
"HTTP proxy server did not return a valid HTTP status")
|
||||||
|
|
||||||
|
if status_code != 200:
|
||||||
|
error = "{0}: {1}".format(status_code, status_msg)
|
||||||
|
if status_code in (400, 403, 405):
|
||||||
|
# It's likely that the HTTP proxy server does not support the
|
||||||
|
# CONNECT tunneling method
|
||||||
|
error += ("\n[*] Note: The HTTP proxy server may not be"
|
||||||
|
" supported by PySocks (must be a CONNECT tunnel"
|
||||||
|
" proxy)")
|
||||||
|
raise HTTPError(error)
|
||||||
|
|
||||||
|
self.proxy_sockname = (b"0.0.0.0", 0)
|
||||||
|
self.proxy_peername = addr, dest_port
|
||||||
|
|
||||||
|
_proxy_negotiators = {
|
||||||
|
SOCKS4: _negotiate_SOCKS4,
|
||||||
|
SOCKS5: _negotiate_SOCKS5,
|
||||||
|
HTTP: _negotiate_HTTP
|
||||||
|
}
|
||||||
|
|
||||||
|
@set_self_blocking
|
||||||
|
def connect(self, dest_pair):
|
||||||
|
"""
|
||||||
|
Connects to the specified destination through a proxy.
|
||||||
|
Uses the same API as socket's connect().
|
||||||
|
To select the proxy server, use set_proxy().
|
||||||
|
|
||||||
|
dest_pair - 2-tuple of (IP/hostname, port).
|
||||||
|
"""
|
||||||
|
if len(dest_pair) != 2 or dest_pair[0].startswith("["):
|
||||||
|
# Probably IPv6, not supported -- raise an error, and hope
|
||||||
|
# Happy Eyeballs (RFC6555) makes sure at least the IPv4
|
||||||
|
# connection works...
|
||||||
|
raise socket.error("PySocks doesn't support IPv6: %s"
|
||||||
|
% str(dest_pair))
|
||||||
|
|
||||||
|
dest_addr, dest_port = dest_pair
|
||||||
|
|
||||||
|
if self.type == socket.SOCK_DGRAM:
|
||||||
|
if not self._proxyconn:
|
||||||
|
self.bind(("", 0))
|
||||||
|
dest_addr = socket.gethostbyname(dest_addr)
|
||||||
|
|
||||||
|
# If the host address is INADDR_ANY or similar, reset the peer
|
||||||
|
# address so that packets are received from any peer
|
||||||
|
if dest_addr == "0.0.0.0" and not dest_port:
|
||||||
|
self.proxy_peername = None
|
||||||
|
else:
|
||||||
|
self.proxy_peername = (dest_addr, dest_port)
|
||||||
|
return
|
||||||
|
|
||||||
|
(proxy_type, proxy_addr, proxy_port, rdns, username,
|
||||||
|
password) = self.proxy
|
||||||
|
|
||||||
|
# Do a minimal input check first
|
||||||
|
if (not isinstance(dest_pair, (list, tuple))
|
||||||
|
or len(dest_pair) != 2
|
||||||
|
or not dest_addr
|
||||||
|
or not isinstance(dest_port, int)):
|
||||||
|
# Inputs failed, raise an error
|
||||||
|
raise GeneralProxyError(
|
||||||
|
"Invalid destination-connection (host, port) pair")
|
||||||
|
|
||||||
|
# We set the timeout here so that we don't hang in connection or during
|
||||||
|
# negotiation.
|
||||||
|
super(socksocket, self).settimeout(self._timeout)
|
||||||
|
|
||||||
|
if proxy_type is None:
|
||||||
|
# Treat like regular socket object
|
||||||
|
self.proxy_peername = dest_pair
|
||||||
|
super(socksocket, self).settimeout(self._timeout)
|
||||||
|
super(socksocket, self).connect((dest_addr, dest_port))
|
||||||
|
return
|
||||||
|
|
||||||
|
proxy_addr = self._proxy_addr()
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Initial connection to proxy server.
|
||||||
|
super(socksocket, self).connect(proxy_addr)
|
||||||
|
|
||||||
|
except socket.error as error:
|
||||||
|
# Error while connecting to proxy
|
||||||
|
self.close()
|
||||||
|
proxy_addr, proxy_port = proxy_addr
|
||||||
|
proxy_server = "{0}:{1}".format(proxy_addr, proxy_port)
|
||||||
|
printable_type = PRINTABLE_PROXY_TYPES[proxy_type]
|
||||||
|
|
||||||
|
msg = "Error connecting to {0} proxy {1}".format(printable_type,
|
||||||
|
proxy_server)
|
||||||
|
log.debug("%s due to: %s", msg, error)
|
||||||
|
raise ProxyConnectionError(msg, error)
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Connected to proxy server, now negotiate
|
||||||
|
try:
|
||||||
|
# Calls negotiate_{SOCKS4, SOCKS5, HTTP}
|
||||||
|
negotiate = self._proxy_negotiators[proxy_type]
|
||||||
|
negotiate(self, dest_addr, dest_port)
|
||||||
|
except socket.error as error:
|
||||||
|
# Wrap socket errors
|
||||||
|
self.close()
|
||||||
|
raise GeneralProxyError("Socket error", error)
|
||||||
|
except ProxyError:
|
||||||
|
# Protocol error while negotiating with proxy
|
||||||
|
self.close()
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _proxy_addr(self):
|
||||||
|
"""
|
||||||
|
Return proxy address to connect to as tuple object
|
||||||
|
"""
|
||||||
|
(proxy_type, proxy_addr, proxy_port, rdns, username,
|
||||||
|
password) = self.proxy
|
||||||
|
proxy_port = proxy_port or DEFAULT_PORTS.get(proxy_type)
|
||||||
|
if not proxy_port:
|
||||||
|
raise GeneralProxyError("Invalid proxy type")
|
||||||
|
return proxy_addr, proxy_port
|
||||||
BIN
python/sqlite3.dll
Normal file
BIN
python/sqlite3.dll
Normal file
Binary file not shown.
BIN
python/vcruntime140.dll
Normal file
BIN
python/vcruntime140.dll
Normal file
Binary file not shown.
Reference in New Issue
Block a user